kernlab/0000755000175100001440000000000012774406060011714 5ustar hornikuserskernlab/inst/0000755000175100001440000000000012643171236012670 5ustar hornikuserskernlab/inst/CITATION0000644000175100001440000000151012643171236014022 0ustar hornikuserscitHeader("To cite kernlab in publications use:") citEntry(entry="Article", title = "kernlab -- An {S4} Package for Kernel Methods in {R}", author = c(as.person("Alexandros Karatzoglou"), as.person("Alex Smola"), as.person("Kurt Hornik"), as.person("Achim Zeileis")), journal = "Journal of Statistical Software", year = "2004", volume = "11", number = "9", pages = "1--20", url = "http://www.jstatsoft.org/v11/i09/", textVersion = paste("Alexandros Karatzoglou, Alex Smola, Kurt Hornik, Achim Zeileis (2004).", "kernlab - An S4 Package for Kernel Methods in R.", "Journal of Statistical Software 11(9), 1-20.", "URL http://www.jstatsoft.org/v11/i09/") ) kernlab/inst/COPYRIGHTS0000644000175100001440000000056312376021447014313 0ustar hornikusersCOPYRIGHT STATUS ---------------- The R code in this package is Copyright (C) 2002 Alexandros Karatzoglou the C++ code in src/ is Copyright (C) 2002 Alexandros Karatzoglou and Chi-Jen Lin the fast string kernel code is Copyright (C) Choon Hui Theo, SVN Vishwanathan and Alexandros Karatzoglou MSufSort Version 2.2 is Copyright (C) 2005 Michael A Maniscalo kernlab/inst/doc/0000755000175100001440000000000012774400037013434 5ustar hornikuserskernlab/inst/doc/kernlab.R0000644000175100001440000001050512774400037015176 0ustar hornikusers### R code from vignette source 'kernlab.Rnw' ################################################### ### code chunk number 1: preliminaries ################################################### library(kernlab) options(width = 70) ################################################### ### code chunk number 2: rbf1 ################################################### ## create a RBF kernel function with sigma hyper-parameter 0.05 rbf <- rbfdot(sigma = 0.05) rbf ## create two random feature vectors x <- rnorm(10) y <- rnorm(10) ## compute dot product between x,y rbf(x, y) ################################################### ### code chunk number 3: kernelMatrix ################################################### ## create a RBF kernel function with sigma hyper-parameter 0.05 poly <- polydot(degree=2) ## create artificial data set x <- matrix(rnorm(60), 6, 10) y <- matrix(rnorm(40), 4, 10) ## compute kernel matrix kx <- kernelMatrix(poly, x) kxy <- kernelMatrix(poly, x, y) ################################################### ### code chunk number 4: ksvm ################################################### ## simple example using the promotergene data set data(promotergene) ## create test and training set tindex <- sample(1:dim(promotergene)[1],5) genetrain <- promotergene[-tindex, ] genetest <- promotergene[tindex,] ## train a support vector machine gene <- ksvm(Class~.,data=genetrain,kernel="rbfdot",kpar="automatic",C=60,cross=3,prob.model=TRUE) gene predict(gene, genetest) predict(gene, genetest, type="probabilities") ################################################### ### code chunk number 5: kernlab.Rnw:629-635 ################################################### set.seed(123) x <- rbind(matrix(rnorm(120),,2),matrix(rnorm(120,mean=3),,2)) y <- matrix(c(rep(1,60),rep(-1,60))) svp <- ksvm(x,y,type="C-svc") plot(svp,data=x) ################################################### ### code chunk number 6: rvm ################################################### x <- seq(-20, 20, 0.5) y <- sin(x)/x + rnorm(81, sd = 0.03) y[41] <- 1 ################################################### ### code chunk number 7: rvm2 ################################################### rvmm <- rvm(x, y,kernel="rbfdot",kpar=list(sigma=0.1)) rvmm ytest <- predict(rvmm, x) ################################################### ### code chunk number 8: kernlab.Rnw:686-689 ################################################### plot(x, y, cex=0.5) lines(x, ytest, col = "red") points(x[RVindex(rvmm)],y[RVindex(rvmm)],pch=21) ################################################### ### code chunk number 9: ranking ################################################### data(spirals) ran <- spirals[rowSums(abs(spirals) < 0.55) == 2,] ranked <- ranking(ran, 54, kernel = "rbfdot", kpar = list(sigma = 100), edgegraph = TRUE) ranked[54, 2] <- max(ranked[-54, 2]) c<-1:86 op <- par(mfrow = c(1, 2),pty="s") plot(ran) plot(ran, cex=c[ranked[,3]]/40) ################################################### ### code chunk number 10: onlearn ################################################### ## create toy data set x <- rbind(matrix(rnorm(90),,2),matrix(rnorm(90)+3,,2)) y <- matrix(c(rep(1,45),rep(-1,45)),,1) ## initialize onlearn object on <- inlearn(2,kernel="rbfdot",kpar=list(sigma=0.2),type="classification") ind <- sample(1:90,90) ## learn one data point at the time for(i in ind) on <- onlearn(on,x[i,],y[i],nu=0.03,lambda=0.1) sign(predict(on,x)) ################################################### ### code chunk number 11: kernlab.Rnw:894-897 ################################################### data(spirals) sc <- specc(spirals, centers=2) plot(spirals, pch=(23 - 2*sc)) ################################################### ### code chunk number 12: kpca ################################################### data(spam) train <- sample(1:dim(spam)[1],400) kpc <- kpca(~.,data=spam[train,-58],kernel="rbfdot",kpar=list(sigma=0.001),features=2) kpcv <- pcv(kpc) plot(rotated(kpc),col=as.integer(spam[train,58]),xlab="1st Principal Component",ylab="2nd Principal Component") ################################################### ### code chunk number 13: kfa ################################################### data(promotergene) f <- kfa(~.,data=promotergene,features=2,kernel="rbfdot",kpar=list(sigma=0.013)) plot(predict(f,promotergene),col=as.numeric(promotergene[,1]),xlab="1st Feature",ylab="2nd Feature") kernlab/inst/doc/kernlab.pdf0000644000175100001440000173717112774400037015566 0ustar hornikusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4558 /Filter /FlateDecode /N 87 /First 705 >> stream x\[s6~?ok-wTnXdE+<:)請b%P=]\M5O8 btYOQ\>d1K_'uiݜAć,}Cr "DڤЉy./6QoUDHVa6*p=j%h &@xf^`Еu nC~9eT4)ΦI9QY4N~ThJ%.Qd"D >"oGՊVlN*nAa&6<%-}I cp[ܮ\mp.9&oam0y: qxaH;hw4xK wJ ]! 1W2?"w?X%dZ{guӍYrV^Ez񇃴Ί/ d؍G;0eb0Q6 IL]{әK4tA'MiьiK,T1 -SrӑgہGLRX 1C^oncٖmRhۺf6y!PVE|di⪒khj6>v; ij1ȹ y[&e{M=-`y_q=b1WrxZ2Q7 n\-T.qzvM/$缼HkO/q OKѸrB8o,'Y5PG_i/>xSiYi;LVZJ4儖>\_~ޗu֏Í(JvqѸڮqquG4~|Wd7ryך[ίn7o֏V VM߼~$rx.ޮmzw[el4EEָLo5-]  8A&7 ++_OWӲO.(;mIQOFENJ`!@@=rl ~-k,As2p˳DJ0u0CF{AKi|tF1XB>h5&=IM}pSh$Oz%`_E<߱I ]L5'"DXD"~%g2}XDEqUܽSj?Q> s?Zt\b%"~&+gƚetJd,"{D~w'Lߞ+ۓr=یZ_EkQތZ- ]29y+TAn`Ս;8x nEn)[[l9Y-Bcmw {W|uʗo_?>$UU^1k4[NF_~-oL=vշ5*-]MHb4 "P]sM}xcM<}7yR`b+<MRJ?;Ǹ#1TpsZN;cp%g?L` 76@IZooLaL Ċ+k g_}`gJ зZ[NWQR%tT'Yvu.]%Z ]ÿ)2[Cxq,N9WHǒZHS;$\Eyh@b1Z̪eU0+lsbéE D. cd`a^6!}01W%HDҚ\&n\+5Q-!$)i%Fu1+7 {\JNoGG# d)sఘi\M "k_[L2a}^=rmB2Ki)(s(ec ѕʔ',ڣ@-2lPw(Zr|b~p,%G_0\;=gP%[7^Ԋ"{er~Jo.4WEP5t )^]k03A:F$z&MFץbz?wzЕz: 2RN(5II NәH݂u ܆ "Bg}(UF~Ag`рaAxQS;l,Kd0~gRw ҄B;?f*MɔK,n¬~VEbŅ­˼cz\|t±x.8]}!~C?OSCх"*x4!CCjM nhsw9t?58m43X76[<O(=NOi,-ҋtNEe:Jii]$m6mOxץү_ES?_ 5r__c~{ {28h!F~_mvG9?b͛.' +^liQmi=. ې$}>O_7۞@҂aZTgy;LSc+?U%817)>K\-nE})(~!Zo@QD,1SĺubN]lv̪Ufn_/{~{ߍL(,IM ܟ6Fy7K7AtP6|T|Ips,-ۅAi?4H4#]f(q^>qi9& d4m?EK-ⳇExh)2 vDSvg_e?~~Va]i>փM'o"[I~r&;%ޔ=[}e3J ݊r.9 WY]+EnFYNە;''7/3[z̗_1ɃXamu.knjtP!o)fylMT.ȫ~C$IHN̼էe8z87ł?Yp*Uʯ%eJ`ECjI{N2cpG|5c0HL /T(['Ϭ!|zazwMpĨzHR2˫?\mrdk΢JYZ>lEٮߪL2̞#:n߯xz0jghbyUW GnbӪyүaMfӦc<³=ϻ por~~R|dn p&_? ~p4.}dROEY!t9r&M}qY]֏M|xcr:UUɰlM!chZ҈J&=Ds:P$>B*FM@˧e^!ݰ-rH+ >l通M̪z}HD~F)cO*#6~ce>iɤ8Gmب?wcߗendstream endobj 89 0 obj << /Filter /FlateDecode /Length 3625 >> stream xZKsW0`qr*T>@ H" W!= H.W.9HcvdM"]elR*$_&6oǵqQNW߯3̭¹ iҿ׷YiW0ă ˸*2&_"h8ύz@uZ^l ]y%3U;CoixC2f$d1H!UilrC_wkf;Ħ^awgVHEYny=&}vD6eqC'RLwj{KiUq {M<#YMȄq/ͧYg֭Bu#nzsĩ:#耋'f/(t;Ŧ^=e~;|}xay3_s?.ԑ 0y|*~Ӫt>ɠ7qjGU.r'^:n~Iq*]UɪHMŪ9|}@;ʭiq<<`L@!`p0V`U5=nbJe+Ws&b)ЁH]:=t@YFC1aS[4{S~BpB,i RΖ8nN Ey@|RR .c{=-D=h(@6$'gf'U_L46ؘ}ABcM* i_ih& 0jCzjK*Le")S]fd!ao$A=/mu{)`nmвj2Ǚr>~E}݋8AGϖTQDPHr`aճbUp@*[7i&̍Î | hHin 6 UGUQ]*1;pFUPK=5#Bi '%KlNgGD=_Ot\*}9:gK' @wS>Qr[-uW_vքګ]9UV*a7B!<ȬL4]HOBySICxǻk:6.$4fk}r?ҁ1S4`&`NUkϒE͸Cy^]VrzG( ;\hlR m q<`'Y%e [. DXqBGipG"Wv]8Z{l xŔ)9c6T.); 1-vL›hOhó \h4?nZCpGNd:6/!,.TEs ASydDn2~^:VNjؒöA c tHj[#ODߥY$ D=e+V C7 ݪ߭oVߦK".\5] gC!|F !/sALhE@6,ٛ7WH^Ke&.#JxA$`mfKC:w ](cY1?tH~ɖ@-E E82QNp x9Q)]ӝr)A}sF'vGV[ ' B<)s:~RvTq2-+u-汬}7k;E5vW`U:&ylyW)U͹~iP㮊 އ0!\AF5o?\8WnyԘ?Բwᗏ0k'S/UnE.XiRVR?M0P2D6!/ o7zQ1vVK1/;u,vgI%vR6?jWun>DG}pAߜ)Q!>?(̙W2鰞7g魯6jɄ`/s S⿞KJ.MK}O'rˌ)Լ,H꼼hgO)?}kUpYw*`"n͐H+ ?oP%!X`&ŽO۝/FRǩJnVm.%@z.rQwTu)#jY'7GJ AzBTJ 2kx D.KhTsaWq |)ٱ[H8 <\kV2, RE3S*)  }2g3X}P2/m?zf{;;&z|i>\ M F=m^eX2AgT=?]5׎bendstream endobj 90 0 obj << /Type /ObjStm /Length 4053 /Filter /FlateDecode /N 87 /First 796 >> stream x\[sF~_O[P)w$eLl amf)Jmf44oZ1`j&,ab2M3p3, 7Dz7dBmR&,˘ԩFe¤MPH&EihcJ$&OSҡGQmR¨IƔM7Tj)ӉG/ JaZ2m 8^̀l2QȘQd-5(ɌEW3KISHzZE*ǬMg6LRf3t;R9M|8g /shh\F j<-5$iv⭢ƞyh(KIB#6L֠Q, jӌz60ʴ\x& [QsIbHԖThtO&]F!8#qR%Q;Go3##I2䦩#[6v " DFbѵNDdkI5٣tA&d.#i$ca[a$cY"ÓyM8Oz,%xS;U=.۽fhg){Mݴ([TE*ʹ|}kt(-g-U7-Dξ{Uݗ-uޗøL/>,"Ǵ0j2ۯfIk+9WF&l_24k0Y(PB}~hx -eBn,;SB (Cۦ8({;w1|ӳ+!]{tYFbh~8oq`nm S=c#`9. 'E2 x mog&#_nruϞ>{>&?883;sC\Of(PHEYʺ 3٧+aPnٻFQ%U:z:@&~h4460XlB9<܇pN Z!ܤ%91<}H|]=yJ)nר3-Pk>%,mZᩤeyb(r/HJ9/iK(%zLC٠Kf9&d(i 3kě4=^\K%LJsDA)r9dX*g~jPP,n{?EPM{tR{pEY.z`eCIzɕ`4#ԯ:Ȕ ciHVT0ĥgv@ \C.l]&.]05g"y#?`N݀:p4Vs8]8&qY%"4HM Vh'ܝY=;6lfo7zEqrbcl6ڤ@ӆg|*BsR"(xÄ[ik^tU%_w˺t' ICA ȰbUϏ/+ŻWX\,`tbVL$_tG,QD*a D+R"_-XY}l̍<dpw#71E3[lʋrЌ25),͝]ܗbf'ږjgڴl9i)/eլ{eL%M'g둕6})EdbҎgϝ&e/iM@.xבiU'/@d h |Ewu -Ș?gwTq[)78:gBڞqD$[$@d\ :" Xiv.@={Q?_sR**u1Kz=wmsk]G4{^yc^}v|^/J[nXw`jJ"`kR/jg_Zh˼#\;3_VFVaׯDfCh -O;i~&Ȋ2n{{Eg.ɢ4m3yN^ cJ'IXZs+OQ<>;+6?'+@pgWxJijPʲ|^k}C&IUW]_mwHreJϿ)H ODw^n;j!-*&$#$c'N5ϻśf]o_TۏwdN7 Xgr-LOyhrP&nM:;cLlo6mvvgl߻8od߾}|>> 48䣩X) Y7}=@{58vQ抁 AFno 9-ծH~Q3@5)d[ W [ p i>=ZGYXBZ +^o _ h9I,*B. "K\ӏ^Q}0|.r:@mC|r v+m4U:9T"uJ? )H=jA^SO0" ] mh* *ck bz. rܐ })Ã]r냧շ?{飰>xڍ8bh]}c_76ƾW} FK y-r^0)ϲ~G[fj"? Z.^Μ\s< xrdYMxpU'~OW欣vHS"R+p(ՠDNO#T,mCAo=eg*/"uw.Eq|^//$'z@4łwR4}RhA8oyZMU&xT0//I CEӚY::AY-N !0Vop%妙B$tw%|#b82I`)}> stream xY]o}ﯘ,8bqa7 nSZ%F\rKr8rIyQL;1áJH?% j]0 [a g*jk*f% ̈́ pSA oZ`D VpNd?x sA M$ (q2t2n Iptq e4|J0sVz4ZAs+hvHhU!qhN3ψ5?З{XҽPk{ 4k50pY 5~# $-iڒ_"# HzP !(#{#|F4!"anlߋ͛zXu"onn:˷Xu}fӊE[aݦj2wźxZ\(Z]bqQj@6cq{(zfHĵ?1kDT> S:)cC.52EM2Y I&)g"5d@T:5r魗E)*H*rHD %%yיtyD  Xdd2ig˦H4IX%HQLYH1Ts.fW` uʠX =R靤uɸD]R_F×)?ά *t%0~)9%Z3hgK,DTbYVخLtLLt%|؉I˶z.aUK՚Dbtj +TƖcN/DcU<&|tz:hIT\hO,#R)hlas K>oS+_ƶC#WjN^@G+Rg<\ۉadKقQ Gg0 . NIsb3lb\hk~.%Vx~dFy\G[W]!cɇb7 |S]wUE_~S ia_Fͪ?% ?uMhԯ~}uWԕ}lXMVSΤ,hl,UЏ3%oUwOC00oO)goj]Zeݶ:|w9t,nO缉tLTAޣ Ih(ˠ w=1=#P D3>2xck"Ķױmc;.:DhǸ,/%!vvmt)|PT{BHXƧZGVf)PƮͶ]뇺qinn.}): <yXT d#a뼪bnXy_⥸SV\`H1o*@{ vYl!P9]l科~f ܺ M:;˧w;TTdqD . WOXۺ:џvg5Pù.$^]vE|4R/&PYt7v4ea!66+h[{ƃE5L 7&Yǵsu:O>AT UYI)ę=?<{?]l2xW@I_NxB-U#rtbӐDvb"/aC,[z]; :IfKl.}_ɣ- QsTyf9CҮv]Wr/l"MHF]i_arU'>ƴLz-B"LW_m~)[t!Jg,MS Mue]lnc1g9ͷ(=LH_NaF4W ?>%-!kGlj8L}b lc3W\)~܍gJ~XkGm9gƁGl G#;ܰW|)D`oƳͻhXjil6lhxF*ejɎu;8}_Zc(bLR>O};ԛѶv|nv~y/endstream endobj 266 0 obj << /Type /ObjStm /Length 4544 /Filter /FlateDecode /N 87 /First 799 >> stream x\[s6~_v:%;L46i-367}sʔ,۲GHrsA+狪P.2P rYxE*bE.d< jl!5F)|!(SGnd* % h#eI\j騕+TtH/tuBVWkz^Vh%f*B{>BGCLa*TTC1F_ˏBaV0jLVda*FYk(SX/-l D@" pkWh%e"Rh{x h .P+_`0 E TEJeA+C Vq(UvE$z5VX`0 bDjhLSV\+ 9J$֖JD0!"Jq/ńi E 'q^ 3T Y@78 4KP* pj DHl#FHOEuePa`Õ ˿v`69MiWBƳ˳zA5/ݧhvџmN^n\qj8^;-+`c><(>]Skq}Ӧ]NxNndzf3Fl~sQ/X|!B<)ċfDGUx΅22҃.T%_T?mzM^dtORSu#2r>Jٻ/%ޱ_yv[fg6_v^bmr]O"C^7#f mgnjiI/O]ɾh|,؇[G~^4ʫG?xJ_3Jq.}>}}?OwZO<|g3Ư}E6iһd/QBeY8ߕo @2me:^/Pyvpu"p#7x nngb9y^7 qiSʔL̾AoNO@#PTR9@Rj?! :QB)nj}SmR)ڭu {'wBJ 1o,͘(PMߊB%BQ4CcBɻS19@҅uKE)2u@DȻb |,u2FZI Gog<IPZfꞏ#R."}PӡB=O n )ʀ/lw2.QalޞWz?rsssӰ>22}?iBk=yyH޴@`Dߵ4SzfxS9OZ竍 7Y\-uR%lbp*+ڟ8"<q.k'Dك<89_ *zn qL+|,w0'f4wEڛEm\SwDavz4bjzE/%xeO #ޗ5`%M]Z詳XO܊n eNR`ޟHSdDz{XNF˓{lވn>G8Ѵw2 ~1߃<}魔br {)vqFUĮ}=!3Ly"Q>p99Oq”2dn'v9P9P?S?S?%{~dX ITΙCw[;*kS`A=|]^3* *FB>P} ZmI-9ךDV)ʩ09t0X)M_ V#nICE]LbBsu9"W!;~{5XJv6ߒ6eW]U )H^JU3 ]-C5Y m:ÏGF?$BCHB Va>IaGzhO!ZNS]XD[嶆^$,e[3ޔZw (Oe58H$;\Dr @ ޜA4}^e0S'F*Ѵ'D=;~X`ӷsTےLz\qo֯z8/٭ htW7BkCފ!MGzq*M9+f4 w* Br_azJ "oӨ*pb&G_˩}HOɟEl 9}g{\ӳGXdQ$_<: ,ݽrn?ކ4nVK[\ "RiSUt@dGgsӱ'K++.^OM)I!.Xb(%Xl|@JgD8>/k"-4.f'uYVB6$.p(ZƠ xc\H ,.FHM53(`%/Z{!We똊CNSçt"z] Mc ;F99[ ⟳ŌTOCk@gUnG`/-q(jr#;o;%hmu^sCYrpJ8GcH0CJm[hWT8tqZi+3q4Gnܼ''j3ژ\=^]j_Wώ]ļ>nG*.IM/g]>Ls%ɢ_O_*_tb _Q!qz\IWѽ jeL^ |q d\/6;x{87x$SL|/~ R?ZZHfD7c\59[T zV̛kXq-b!&b*KqFz|x]5vv,Z1 hOw'Sn)>c+~"/iZ[ ȧM:f/<)M=;@' nÄ9x 0Fk\/Nvs G~UiV}[3o% b#CJQQ5X̕(KAߦ s]Bnfo/XCJd6q /i1-DR,ZW\v.+ 0 8n.AمQzpxMCwwɻ+bfA܆Nnޣ4rzɍmI:-:zu^bcGl ϗš_!e7\ Xa8 ؊vE/S,![JY?|SQSLH~Tϊ'S(ru4|(sendstream endobj 354 0 obj << /Filter /FlateDecode /Length 4413 >> stream xڭ[Y۸~P% C9fؕĞTj7#i-JfN@4Җ@wP2YNɟnR0aR&[Un1ajn}o<t`Ƹi}-rYV+Ki:}:I&S~G1Pʙ@ߑj~c,jYn@iF3}‘73arZF))(~8M"BKtRL9/ ̍}hx̲iÊ3<#1-Y#s=gSmQ_[>ma#y~ ^^,.m%lej|/і;hyJZq8A@iP8G ie$Zz›i\tXM_-%v"Q rQIB?i3IՊ39e ӣI!uGC{ykZAAf -8fE2`4ޢܽo#E K_1u|;"Tyq[)>(M_τ9<#A5 (R75m㺠cٔ*!<2{~D> >`كГ&]>< q˞*+ ]Z0WL(=Rgi*idӏO{41?&:K^ª,!,(&P`4tzPZ/.tZ]Pq]<{d-nkV<  pPZ<r 5 Qm*RQ[iD$e5y7)`|'.т6s4_@V:@d 6Vy{Rfໟ݁hc ^lؕIuaxS Y:tki#um[u`wڞcx j^sH>-{.@\O8EXt@7y\`^ʰJPꤼp:Uu}syHF{r@!dK$I@#3dt)W:}ɡ$D2$F^Kve ] /eo04|"K3 !Y4 v6k~ T-=M2Rf򛕏"eg+0Qm;)Cy~u<>X{Lj=.ȴEO y©BDQ*I_J̨1X*iW E42g^p8E4kW$EVPhf p(FhƇ !+z0(d2G9Ѵj^;)yVٖ?%6 S5 ,)zֆU+_nBͫOj ^uiLn}aM :[aIY #GRҨ=P  2”bYd#=J&zzy@\P dH8o֧ 2΍* ǼB?fעmO4'.ɺy_T}#ޯe ߉M4R%/֧Z 5闼Hm}}^ɗQH;fKBUvmg` \C 6ʵ0!'0$diUqm L]qᒞYHٶTBǧ?.p7_x(Us&Q7<7:pcX 1ixxTƂ~LU$!QmrH4~a Ԭ0x٫I qa;{Ũ2:|XN;U 9"BƵ0WB-QQ_(J9S;+Uq:¥,ϱJ~{p:*لO˸tV H_Nfp>AݠDHlkU3AvQ&:|4B`qe(1=|0PFR`ȉq{w]!meX:oMıwR8mIZp搶{#! P/SeūbC-v68rYwN00@GEO1JnP~&=v׈*X; Uǖ{YJkۗ/)wf_;\@(+ۆ:Dz&>͂D e^SI`&{"abm-_oGp/C,Y_,%X/5Q/):wŗH|ء3?#NXۋSV+tE3?H=)S6+#[1΅6WqXA/>w1|dW6[9fVTBLHG 5$HFIT/%\ȩ} ĝ f1my֐_5vv((_{.MgsHos dӗ |FJw|RO|-3T`?vk0}5qED^q(8چ ,SLQi}5,ߋV_+9\U >\_X/uYz+8|eTjҾ;$FrJ տ@{)S.qюYqP\_{?GSvC3c- 9Ͷq \=nέ >8?޽WL0odv*|Q (UtZ||Vf&*d]Q;R~~,MaI:NJ!M4ߺfF9+[{{6d C=a~xZO}[T5b\ > UXנ.xciGq ]QR{@_|W$Օ˾żŝݯ +}t%y،0ejjNDq%=2Rߢ3۷WqŸB|ٷT]XP2jHზm6שFUb+eӕ"Cv|"l;k ^A%3g$29l:W.[E5_,o$P%y?'>qʖ yj"^/ŌJ)3~6ځj#>H-Y0`QױS~ OK*>zDn _KաӓD!" NWM=sDw 6Hg-y$ K`{#݁_!&E2^/>Epcendstream endobj 355 0 obj << /Filter /FlateDecode /Length 3842 >> stream xڭk #P/pV%zܥM |JAkkmlɵ\6HI۠a->8ËExW*4_?.*J(YU&[o?/?;%7[ٜVƘ? mqlw<'o\k+p]v?u@Фi' xˆ+ب,׻,_ָ~X6MeqmbؽsNk~H]ky8K X7-ahuw yb$\]o8䌄4Y@$T 픂AD^C7rrBG:#L@dGq-VIU k}ibe\ڰ4l'o2'$hS/c;pCl{]"qR"b:Wy'9(Rߍ-@053'GYc"> z.<>q1"8ћـ-=7= ].lO<_<&5$&! oLNv%QQw 쇽O)aULV.IUڶgDR ҁD r&6`aNBR*tyeJ,K]Š}#xAXC9Z 0 P J@tѷUAcwwԹF@d Â̼%:!k诪yfycfG>+h7alpj3̚$2E:Ǭo,g6JaQA,vX5[Fz<-(?mQ,tpy=ޭWY(%B@eC|NjZegEcR1|7T oF>zW:, #SgVQ`DsnC2#vV2*lRkPqqCt4H7L5dw|Uet{T: /2)HXEl VlbHFjbqgu =JXvrc/֍hAgBW OBW zk{n'=Êg 0s : 8̩QCzA"qtt4}N5VJhc--e𞧦2/!ホyTx!aW?礊]r6[Wo 9a5[, 3k8(it'TJRLhOe#G6l2͓& 6: wZz]~#I(v!j:MFf=<3v.An늬 L!쳖MV8RhdjO1bX$>9z:NWLl]RW.U; UN{ٯnb"“:t\2Q Nb #oK-[Y?- 9zI82M4^i0!jp3B?D={=u0e&!.Ulaa8!-*e".aM}|ع+9, ٬떯⊖߅SB 6N"bNbX ^gu'F]ٻiIMYݦ9x4b()8^~'| "4S , ;r@Dia헃dž:{~5+q3!KSDq1UQ9$ n>\cP@%kB,^æPEc}]Y݁'7p&}yVV&?9Az~{hQI!(n]_tbV)'{kRYƑm-Ȳ8iLf*U^/P%r]#IHq~pP٠HC=5s3X6F=; pDEwL]>AzՄ7 G5*z#*Ss .ڳ Ql.I[Q] .m [IssH̤ nz'C/YT.Y iGZm}uJ֦E2ptFy%IBuvX8M P@$ P8m;(M#!=CdCS޴>RG8ѐG0``=X=JHN] n6Ik v$F~{0x7T"mW5yYe6!,WAs"8頻(CNNL)͢pn=W1{|Q8c셋?}9Y{5 ]=I]~/<nW"K)cݎ=K4AV֝sv#8]˜|5Zբ\XyTC^>Ac r1-ʒ[rR&K\]bEBFeTKft,QqzzE899Iĺ e[AUS8!+0T80&LǤ0f"Pl%nTbKZA1Ƚ5q;y@䁓jKFT5"F ..TnVz&݉gmJ,U9Z[ 74ީzi系t*?ѢNͬ=)ʑy}#~BE| +-sF/PF#ZDp˺%{t Odkc'㩗0kRքdt||T.'w HwDlDyrp?[3k1Z;-toOt%%jk2({MW˯/b:[V ?Pz{OjX.jPQFQKk&rġ*В$vսlpH}VsrBʙUeɥs0 6;z#AW|q^Fo85Z+|g/|ރ +eFל++&3 8`J Y8\s=W]9@<\v2 }701ev.IfqpQЇqaxԷ)<,חQ&ҐG]^nԢG ?"eN#Jz|>I_Ԑ8e_h,֤Y4z~uk"s HA\N (Xu7{kIoA[o3 NE\&]>\%a?k#%t(ȹM8Eb"UO'A[%'6gb5?tmrA\gRZ?pܽnO&&!Ϣ|u(g @{l_2c%ݡv.j#yL7Bqb1g;$q.> stream xڍT[]ݭwwwKqwPKqhqRw+r}y[ޕof3; &9H] "`gbegDA@it@.`GHn2i۫@p r 8E [TX Wd)G'oky[08\- 5tܼ '+ޕZ vh\A. KAUƊLв%trx TP:2Vˀ8X98;haht;X@UVˍtqu|]!@W?3d%. Q"n^wYRG~`{uv",ݝش M^EȬAnvvv>^aýO }VEVd_W ` pxw{ _{?OƯeeSӑa$%,Nvkxvƿ*8X9u'c`w] F<oϭwBȟj?5 i^@u .诡tXou $!D, dvWk1cHǥ``g_{8\_Oun; _xh lnK]8O>>9lMiC6k?z`~6! /| i/|iy%o'׫W_ݿ5ȿ5u~ub jzK7N\yy ǿx0kf klb*}@.%_`z9}? y,-l:$HlJc߈?7!D~NQxe1ׁ4?8Uz$ EK|O'Gٝ]W {xP~}e׎3 )2FH BȈ}1s;]0\1 8;g7-N^"j"B2Z_tEEM|+a_)QU)PxZ@t,h}i*RjF*MHTdzPL]}o{O{0d%5P[h1 ք׉ag pTm[9Wn7*~zY 4:|l#݃ 7#?1,{{a'8ц%nRvU[Vem=y=A~ kCtaFCZ>~zD,Y?g]߱ת42Y˵% .B@OvTe]6c.# s)(70fAQUgރ(ET6#rO2{̟ y届<ثT30iA?QWHϿ9(M2Q@4{:I~jsO0]NB&y0H?UTzs<9}eO*1AHv5(!H)lSO^ۯC/'!b' b`'ى yJĭ)1UP,ɺFn_wNA>@3 >A\nAB)w+蠮Ce(xʘr&'3% N+_q2~7-aύ;굇VX^ex^T\J4x*:Kw5[)MB]FdS啢Q|.8>Pcs҅{ia'~b2/U?ء}rz!O8GQQANeJ\J̌x}ŏd)ڻ dKӗ*yʁHPёqaփ (jY< y2zl% vi7369~Wϑ1}bt׺Vkx0]"FpSC̉6Ou~BƊQGj)<+]cxra>;uCHC=J7#(N@bGl=1cC.g 1v۾r D뙢~ hD_՜L\К C1Gzn}P{Ps7,|m4=@!~XQH1_Wdwg4x}hҜIRwf^Q-tF?|;VDYƂJy4sv$ A7<^t+_n)&P|$&0ֱ`iLBJ0䮘O2obٔB?wF|ԏ\TM?w4)9 j߄( doqaȾg3m1T'Q_ [*{&tTgb +e[C-*i,Ұj6Lbİ9CO\^=+d/6H ;7䛄? aH&ǝuF<"|W|d>Un#0vU5ͣ􀷀Q^9[eSUY6f~|!jxqVϊj;DDzf_zҼIZBR,CwÓĥQp^6K"Z0'sBkzqx1" c. @% ֡/oS =.rs'R0xNsR#Ա;*n|Lv_N\l($>Qҕj yqH8Rj{:1!5ZV|Q@- ܸMRV hY0_c=ع]1zÄL#/ٿ9ƿG=ZW0!^K-QI*k"\3坐.r8L}{,nަ-X%WwK$gB`;qdXFAiQ=OEdrhwV/LRi-"p)7KbОe4~' .Ǘ/:;sͭqT2:N85Ji̧B Th|$k]E2xQ-&=K#m' MtpNc7`:hur%$Pf<_X*/78: !B̬̚QK*߀2i U:hwZjVAHJү)g~SS IJ&mJy$`GrzsÙ1_dg]6ƖՊ%'Fl|>ıh)kFxpi2j}Pt1y #Qã}`l32$DV3b{_Ïթ@Fdp=C4ֽ"7Zu_lTmV[֧MT.yg8M˼߾qÐ{4:'H/!?|q />:Hπ:55EIMzT6q >iERsMLZ-a;$ o{W꽭)=ؙm&*0w3KmZY2GhcsjΈ T`N>=@2~qʞvG$\gr4M[x[wM0il h/ ip)UshpsT, XV{njBnK:r  >?S}ˑ^rKǼG58bY/Ӟ9j#[<{<TN wZ)2E$QAqˠ,v>5 m5+(|KɷwI%ce$/a?VAmFAtKAl5@߭Dv{'ݞ>r@GP.P#\扜&:175r:4h-賖2'V kq"Kl(v"E%MїuSS {GuoMF2 c)J΂׀qҶɷ줶9\,vS ,$fvɅYjM|")7'.:ѓ.& $?mZ9ITlj8 jV_aKlM=vXSYAC@){-Hm'ņ~(b{/{a%8hS3JӘ7ݟIte ">A Ty'uNS\/rrf^_ P:ws'DbjCPALV3i2>8R&(5'_ $>OzHn5Dv> = [ 8p5ƙ9)%.xA BN!~-!d{_ɴc3œHIQD8?=/"Ԃ OV$qE<g:Kڗ GV_:O` Bݚ\F٢C*l~\TU5+eJӢae1c2r{`ģF45E5l+m=+ͫu O?Ҵ){݅mŦ>uG:%X(Fb7zsT+j˂[~ 4p$zgѲ^{C,MՍϚ h Fa+N0\G)QV"bI {6$Qs8#"V'{m†;dE PCI^$2ugKJ4Ts BO*g-8|hnѿde+A3dxɐo^>{1f{ҋp28S6,F6֥ľ/l4L"(B(Ŏ* zgֽ+2;IA[.)mS\fsi1qNn." Q;]j1#imB *S#f*G/оo0mW=~g&_;KD%j6r^HQIjeH/ nluD|(al`0y}в./ % ГRSC~4\U|s ٹ1`Qk "鄦_mu}r Ɲ3$k p+3W^O*px7kw'2naCo}b3kcB3CтCNyN51ˉ[e=jă⌱71Od,*Z-# y`kXZrv MGfܷy%|lMn:^Y3Y,o?PO_U`(R^cky_!OK8gYԄX޾˸V<|Dyƨr93yԂj8kEQ W[;dh|*^NŹ-f&KWN(GqU# zd4,]'|v1x/ZO=%7_l,x9ؐYe_\5F<{D._=2ZwCX+}.ô\8\nЕbu<+oK_m49(lO[},1aD#~{};T1aӯ4z7 6qh Uً'32.9@uQ<'ۅ^' mHJ|% FPHv\` }C4C2=1;4O&oDoz66Dk; _<UöE`>ȯ5}onE S[`^vuԒ+dKI+4 U`Rx,-1 jeINZMވ:lpOCUŦӛh7+]J vҺetH叐xҁ!Mcs UX>ϢטI2(oS/xm28 &PZ@>qVX4!%eR4mOaukw"g. $RaӘ"p)_Rkgiն4~7s xoXo:=5N2Ѕ CH_)j;Nt? i2eCZ|.4s֝y`RcHKX[3e"YH]/dIP<#u3hP x0?y()qu7kM,]w@)\hnf.& v;_fYu+ H0IMy%,1 $;@9N";v~.`aEIq1hܯϷtLsf ["n5?󱑽,K1!lLN}H? r^&ɢKzp%K+L2# 2 +EIW {G Ewk>XB9Q]̟]]gu惘bl[xH?CaK WH7Vxzxhxźnul[[!oHdNiPm Fnn缝28H>@o8]2ߦGh*}ОREFG6-67-Qdk=.0jeoSR;XF+o& vF>S3Cm MxgqiB\7㭗tKǺ.lUݼNnKRڈP e4r G Ss611;@|z=äUֺ E潵5Gz%}ŏcFg%5r7 riЊgcoj< re6 SgMZwZq|WeUUZ.cyrSTP=z4t,.v>;<`ٚңk@ '=JJ5;VUӖ7hy=Ga?ƨy'#gϱ*ʵd@~qP00_`5FVxV3܈k5n L(*ϝ;B8עG}% V>7˄eԮS>QuQH>* ֒ʶ#W"Yxd/h½UgnQٳ Zʺ4ẛb~2w560 QN$Q.E"&$rEoS׊Z6T8-(f7ߝho}} z*{J$&ŖI?YW\nλ: , ,ʁ)U4J&hq{ث;*~E;|1|_)-F39.ўbg|t aT4# kNM Sh%%\c#y2]ӢAϑz[ xqzxhgᕤƨفLgP- UF2 ;'|u.1_Ts*Js^ FDuTɝ眓^ݞ So^hR/u-٣ V%IVouC\:pttdKެ5>52Ntݯܵ j-}-8DfY,RjgBfL5%[B#7WA!)G *u2ͭHPϹೣReZ2x.I9CzWc 92`6L3kTu_&:+wIѻKA4?+ l˾-K6 Vz)X a%ukթf \δdN!wX2B5XU+܅29^ @4i{E.~󰲻p;[dc$jR#$Q%hCx2Gk2ps$Bn\8s_\Q#A>h֙~k\PߥRd#0Tiԃ *}3}B% 0}2_ \`NN2,H $zZ'M5IDqa!뚢x wub$tʤadYW'9ձӪiaC,#|tIfE҂WvӜ X7Os|r=@EqD%noe0CϞ z}4k|mo!hb^Pbv'\N+.m[63:Ӱ]sE~1cgT4X%)9%7 n{SWTm(k,>ڦw gՓscq6~WH vhr(Cߥ,)ѓ8ܟ=ߦr&m-~:&)PT9yofq[ȸsZ2ԴL*OLDC7iPMwÜ-uC0%gNQCL plKH+A߃y33ը1=#4Ph=Z%Ou%[s!'rmoio\ȡW۔t] }D|6 ;̊@Luj&e> 7Bc-г(Y*.zSm1gHE,{A~qlE,PԀJpRt@8`W{/rJ3Ż)k$`-UiEŹMF% !R\[uX]PZdl5Fe޼@Ԕ:ynN-ҖNJuj?"B2# /u1ۙo(n Ous9sa˸+I7 ߰kT$5Vtl$/Vfe9Zg8NMpR\N襤G K%+OE `xC''b}Wr"W -_zBsOP4o'mtr_V^>Y(GK#/^PE+tz-(X!YO +r8]"E* .$y{?qiY[2|\8S_<Y#+#bk6LZa^4l9ZP[\Ћ5e Y}s]v`z%BŪfΆ㜴"5"qƳENX_#Kʭ̶6ʬdseI#2iUđIq+a-Fת ^[L(||h.0]m$a/^͛YȢaӶb~-Æ"ڠaץ/$m?@r95Gy1c֧jz?Wlz{Ǐ? `*2B]Dn g~Zj~6DӟH a;:@ @^#=i8cᓧ6yHq닔⤋5K!@8۵tBaT3yNt]\F܃o[y&Őhi6:8I$VZucp*ke]g%vg_XrMWo3qqm't|G0%&(Gw޴+QJ W6|(H!ퟓd*b@ycʆ'P|'e] }c B p(& `Gh\Qv_5[ ã@iiQZWnjJ@YyoU` TXK&#p.m~),M| 6A/fstf"XpTpDUP;TS,ݗe >&e]q4Y>(E$iv:G]ZtZ̢o%#. TOhw|~J4ڙئ;C'WbW*t}Hx2@qB&Z]=Ҫ+V3FPȕÅseլ5`4#Rhύ^iR/FS3#vpnt~Eۢ"qa@kh0@:f-%*w%_%x3rUE&BM0$)x.4Ў*-ãPr%uzH#Gk;(w'gnʲeOܥmaܭ i R&UwCIݓr:S)z ( ETÍge?Mrp@Nk(,g/5smQo r -endstream endobj 357 0 obj << /Filter /FlateDecode /Length1 2133 /Length2 15118 /Length3 0 /Length 16396 >> stream xڍPZ Npиwh5n!8AΝ꽢 zm_}NCA lbkqb`adʉhY((TANV(ԁ [X: ebNr6ig+ C[  :"Pڹ;̝#ژE/w5dlh3t2Zg46NɎՕڑL r2(.@_ScDVؚ::+1xP(m6ۀXYdc0Y NnNC m ] AVF* !0|go~ ;'GFG_ fqQ[kk#_}wgZغxlLLalǤfwJ]Gftp033sqr9_ TR%~igk0} x:N@o*!L@N#Ow1o~ 76' 3rc#fҒWSU7*EDl lV 7q A-r]=^ƒ}\ Ϡ0s0bVawbW#C$̀ [ᤑ~qϣF'g8i졩#oD B{^/^m=Ο>(cܻJ5|+9W)\>_2OgK @G~2s;3F,G}V䩵V؋GKy>>C)r,YZ#QlJ"Ѕbh^G|ljk h9LH6o+n\GlL@!KEдxA7Ԃ0KxC߸}f|Y< ݐO!/$mವUNdk UY E@XmHx|x>2\{B\WuZ6)D[H@-pt sth]y4_*yj0GE_ws52u22qlBbGtCB|lwOnsu^Le2:|V&N3zQ!yGVSL5RR~}TA) jȇ1ۓLnw@2MJz Һ.\ե;jX71tnncHTbINXIkA:omQu+:_T8j`$y'1̠b/ ܇: n\]x[O͏O`:mb7 wZn{#hOL,^DXs8T\j(~ܬ|5E_3/ 1P<(,^KHjN݅cO$P=~DCSW1 &PテCō,ÀD75 zk{bsK*;&Uq:.8=i[Q (x=^ .؅\W_z=6D>dDŽc/]$IS4VmtEw f&n{V<_FnZe۟/keinupSaR80#еAiMНex$YԼFK nOWq<+ݘ+| rg0EJ^kH`Eg+E|Į(jn $ФRd}quON4 -1kQٙP-踋|' +){zl[a*?Y5PHVS< JA+;dAhIyyhrl}D̠IYN#dqV"s9ۨ빣,^isi:kIbBi'N~Km.QvAz9j?j- iv~uUy}Tt5l"qCR"Г]g?TO%I5,9o;s& " %qQXx,k,WMʕ-=yzq+d퓻 2+ŀc?X!![ޏ$$<̂N=o}z@N[XBQ*r߸qlq\ \# s΅,kg$.X_ie~6شUk>,2D2!MYKBsb@_v-doU?!i"a<\ʰ b)˾xUޒArD}%V0Pb<{BC#,cV oTahu9D^:#)WGZZa Sm>CZ `HyyrCuôj4գX14^UFe9x/RP#L>BVtv*kze*1T"X6h:9o8A-r{Ӆ]̔^xtp-X$;|䠊ֽmUh7 \P#8k42)@I a &s3qU2+Sm$J4 쁿q 37TQ]DΧL='6{{݇-AZ&8Qz[*=#h³y# SoAΨ[rm3\u}"sn{k1!c)n}t4(/9&2g ,Iѕ@f\#h/+&t#䋜xMqւ31).] pX( zW'Aϛ!U8ađ"V ɞU2g;2[n Xb;h [ 5ã\Ռe;";R"nCR\uJ"74JT$HKgHTMg_*.Bͮd/&ocg!>:9]KCBUɜl8zuHqͳ3ްejrxM, gvٱQ<&;:^RX{=Ł2,iz$z]a2 7t볼%g&#TįGnN>)>O O剑qPǐ_[8 >:`ʳ4<*Ҳkzك㉚c?̪NLhA c=*2CH+bP poSm뽑BwtjrCʣ.חQD@o/s6p>"yj~V`'*m['q~װ{sAyy媤M.oߊ@׶%xcmCa!g#n*dX,ITUCͧ{`҂,pBo8~XgTKŒ|)LF;ʍ![6$P;yܘ‰WQUY)Oiz%R^7AtoeM<&8*Uj7 KODG1|CĹ(3ڤn̘`h;/i7ZQČAH'ƾS5k7MR thWz\GgK }+-㝂^h:&+LH;\nyL , ia8&~:ŕNʽrĎ}[esx5#yXPMLȈ#%S%"}]EgY4yP\*;iO1H74r{ھ%TeUU+߸Hw/q=VX{ETVGZBa8V.Yy޷y^7UVv/MxNpl&e #oʢXT+$ht9]MLDuW\VQN%E>RA`E.Ua5Fn[è_ZR7eqZzx Dc|4R$ :醋 w7/2m;o@6SZKpTe椸^>6j r_M|+gB$k },ɚgH>wvݴK_->Ha&̛6Mqo̓=zAR ȶ4?^!m>Y um[{] Fuw-0b3R*'ߒ"PEʫ( !ga%T dV9nj α=h>DP.`(mq\w䘼U[YڧGҌeN30UnT{#c7&LYdPNۨԓރ6|O` ڨLK܅4sP#akPqvH >IH3!E4n{"A4 hu!zcWK(DDb'(bO?Uஜ5r%KtV&:Ӧ+m\n70BLQV=hRO%n32cz\ nDxC{s.7o#.隥@[sKvv7'P24m@__'grԝn;ʳF6R@73y[OIҤF̺f}]GY 1:/Q IzwX LXMvhɢzS}lT8vRId0I[X>1b2 cZӼFnWG؁$ALI=C)⛝,Ngؘ8.lr tzwSwߺIU|6eU[qY0`kU8vmq]m{Vf9}ĖcOM@ĊXztMs戜 B#kd.m7<` `Лi&HX74"ub^7=ua]3ݚ6]aųXd n;]k\ʗC.5,0PIs_6ñsJe}o{^3%ăAZU%xbXG?޴!tW#5s)"LCtqh} Mp}xf4,)2"aZͤ:Au^l=yR;ㅠ/@i"4ubqmUY<تN~?L\쁓~]>CI& ǝ̗ : c(j3p8%RjQ b~'ĶJmiaK8w#)KjNTh)7_­\6j<՘ψ.c\riq1t5dw}+1@ d}asp;uN`̽ pA/rǼ[ $BmV́1nQuCVbE7pc~ԛ)fb :GdB'J"yw MvɊ=flE*;ȳЕ^LP{)G);Uo% 罄}=zM l-Jg4ruG-^oF|(lkd\tгYa ) !=ۘQ_hq +E݇1wi֑:s7Kp(Vyҥ!WnL TJޓCxjBKH@ #tݢLm+̞(Qc},ե Ox.S=/u? e$"IN&0F5PH'J-)cyq勀ds˒[Lb YD@F͌O9 rH5Li8ǁ4tJ)R_ tidg1jUsv@k ׯmu8yХNQ Re~i؝[JEį<뙔;P>; p;k @x k0sMo0p &ovOr ؊ϟ'r؋53F#*vyVCW QR4DFڞ\/hS?I#R^.YJ-X愖#,!.&J> ,s,h4eˉ̪.09fü 6Π4qabz}͈a*wEn5/:GߟrC6%3ZQR)K9cD5?$ڹ^?R[Tm6$N@LAS:JN+ȵuƼ5+4_rmH'm|Ntw}קʾ*a %cCP~}+rCay֍hޢBG2S(,D#׊`D  s~c*(ͪ.|FY2<݊^šsfwf^L%bfG:lQ| ǒoW_2\,?@*ԃە*w&$KMV)YޫNi:ms="@Ăqrc2O<=)l/}C;OS֟j [#8g2x+K0)_\'BU}"v"Im$N/i.T;2WUY*c௽8#)755#"JL)?8QM! 4'w>-_0+A]?S#>^"nJPq7^UJDcI0knbo?[K1BRT[ JX )L p M_wWt%Νҭc5).+' K5ְ0NWב6=&ojp,X.`)3HieH$<vAE;c*Ewqn u' |d.4+>蜗{*wٽx$fqϻVXx@wt#qȐK}j2)űo ZE %.w kѷF||.Q$Ts'bKQ&Ђpr5SZ%EHkˣQ );kmXZt2}ee"T=!:RFV֐p7jm=q1wT빮^WFׯ'Tk}nÌga8&I ʭ1N[,1Ȍnk_ߕS Q2bY*) *CJ(Y ?2uR-d!>@=)4ESbLIO\/5y%޼fJrZRJ( .`+ys! `G?I }y?' Cݒ."#9z}$n0W4Q o,4En8ƒ s'sܖ6;'B6DPg|R>_oTSmD?HjsX^ իj@ žג J(D C9 '1ݼF}>%T!:K̝gWFN:WςTˈ>biHr88sJsl;k+!32!2;اG\|0~t9XďEE"aJ)|W5:MNgf5g2g*6mSf}Rޛ*MHy\=FVk;ZJn'7vɐ-.-Q /J2^_;np!&jdr?WZֽ?[JY%0Q-?91Vn-kح=MXa:.VA9[s]?Z:c{ Yn};=E]<זO׋ 㡐TY$aFTupdy ½ʫ 'wZ}z\qaJ30""~+Kf?ƅeMQ~ڈ6_bsHB;8'Y|jLs9_Li3PyxX*nm/`\u;€SvH]h 9܇bZs%G<_F8li,U 6XYx w&ʧˆV9c xgX W&rRGʊeKިMf|os-jjDrp%rQ;V&Н9Vt-\, &?K|UZ E+$3o#%E|iRW$PE'^-Vr%+KJ76B?jwUye̢mx,сz>{ubx8nm(Hl]hpB;L>:X`Au ~B2ݢƧZѺ`Uj֍1>LeS;˜g,clOr,`rePm]}6):7ɟ,%TOmෑ+`ϥ{I2OGqcIՙlOY"~0x Iz$z‚2l.}{5ftG'צdzj/fe_}ц-n9S,z/ԇy!/w.H"yҝÆTLD=q.y(sY;Xwxe++N gNH6&VuZ(PU"9u7Xsu⫁+\h0nt=Cn/`2uK%y_Bwk<7F X!T "啱 Δ7 ȰX iJ?ِh"TYQ=8`~a`\'TK?Es ޟ V f\:&*_x 40PSrq;|ڮw[}+Ha{m5-w diH x?D%ĶMճ~&o@Gtņ)ﹷ]a^h@|N\gi>F;ݾ( UY2VAnJKQ K_LFt8D [1_3(9,bĬnC SRRqǡϸVy 6CEaV(#(DZ**g?-,דd\'f zjd:ӉF/v62i-כ\DBߞ%&X7Sc.;sg163UefNԙ' _ehQp@Rߠ{rC""wdDf-tR ;ÃjdjcBxlLT s<@+5 J"e'2Q~HU]$sc@vJq plLsfNJŜE3*YMZiN"D h=΋;Kٞú>a%{X=%k&64U E\4{`'|g$,z+{=rm h-tF㕰*]i xiz5\{a~H6B V5kUOYq-GUXq*8 D#aQ}e#ೱWs*\^˸,7 #=dz8U:D^&u%fQtBXmPVy*X86 J_#,y.8Sƽq"+&)2t4 )DZ=6ٳbE]Z`Pg/@ vpT2A!䇥cМdLTA6$܊T5~ptK=M`^xBjYv%R]='+n4;0}(BС{v}~ I$E ȈaÃ0wGdk+=-"ۍa,=1p] ;09|"Nڄ rJApE!4v%kYԌƵp@Ctq]78nMgEqf̉ /٤j;7+nkŒ=BSYw%R3-l!wEIj+p+ L &[7k3cm E*"$lX-2Ns-d* /)jv2wPӅBqSA,hPnqNkz1qy#th %ٴ)74^I[L`Kr+l505G~eI . fNG}cGl)Ei' c?Uy~j%^ڧB0H*G3q{m:w;gw7MN}|}ho E27"woU!ɔ2`9"AyF7y{-7t<]8m8YI"5̊iussuݾjzk (H:%՞P^E&p6jTTOdzY ʗQzpf4@?eVvqb}f g*CgZ./:z;c1A3/M6?\;H۔a[cc%A]3"&2{k~e~sV%P]5 i7&B`kp':W,5\]T+iT|gjmCݣ=,y!q9[1e;$7%UBz E`sxYkI;ᤴxD螻!nhO!jMaYŠAɟq_e Ir]wcM݅MQ_ztHHCē6*5]o;žl%/^p+Yا̈́,0!ĄoA3I0N [G_gJ:]20(= 6Zn}7;AIī.l>h`aH-&׋I8w)\<˾VLvټ67A@NѦ~Kx\" c$-Igߐ†!~ IM5.HB]Ŗ/G6!c}^)S&(V 1;YjdՀo)|rܫ|S@試GXWC1Ju!4$В\xҏ_a?Ut#N63@vDLLZ$$,йSQްΎI1L;䪉||Hզ~+6h8418` ­*2> ԏbKkp[_YܯW(@4.AG$.u7qQoqrs7Qj?,BG;AAM͟=>֗*Bw ͼqg&X+!%2lXeLҀ_Xi;B"suq>SV:£b3W0D^bKQjWDUStCCVO7^3$ ߴE5qʩ!^:Gl~wephy/9|WG"0ɤbWWh7ql$@{{8*p:脏nFKFrI3\t@}-GV #u.JA&c/Ӄ/'{%b2u(mUָbVOF&%Qb1e]\qm>|aM舻S$foǶ n=g}JW||<{fRn'h,SwHA:<3HHkkL5yQ /RVԝF@):󼢯a D}Hd-d@Hl$}`]{7Z+5v0`ȦpNJqݐB#HDMU,Su/fMJt2_m+,H ر%{.:]q4fhc8ϩ S^>} j5ׄ_v䓲jZlh3D}lLog֝e5Sx8L> stream xڌPi Aw}p\@`%xpwww8}UT1wv? 5:Pэ `cdac@ְq#F\m0M2IS7#@ `ggcp 4(HN +k7p=||L9A6榎ES7k8=@tnn,,N +az&5@ y-(:Όam\6@GW wG PU(;6Vۀ Om,oG6657wrp6uqXR ,n^nLSG߆N6f`TIdjc;EnU~h!ttsEO4ݛ9:y:,m-,'a̪h,BzYlll| en۽3/%o18_g'g%8 %j܁*6n3#ҋwh77dg;'CxY89{_V%YuYqUƿ3W'.eb0sp/nTLmYYGK'lecחxj!7`f3a<7´ %$no/к@ T*-lVbVU hbfn-e6@'W ܚp_* xs򍣹@H&7@>t!(7Uo`AV*oElV`~AV `}A\V"\^ sQzA`."*;LKT^Xۼ@0Y&d_ |A`Z/LVd^( 33/&K9_8 5;Xڃgbnp s'{O|QB0Ifia]/a;:XZ!2~\6r6z0c-xqG4տX+ |ś.8 i.5 ZlEg91/GzpI^Ȁ;G E v ~;gwKqwK-q:UO\NCswh;/\\6i6@? vj\g6XݬA?⏩`@p=`#zy ?swn?׿\@iv\ Զ*B'8zfYP|}yZ2J,ca ݥ٣wM~Ɵ6fFzFrŪI5D]젿Crq}%U]U#P9ZAPuY$3 ։8V3'F$h<_IomTz$ЗXc4;rӾy =3^2Q1Xv8RmԢ;+=g[7z7+qAʸMQnv@VK+vK$9_π T]v=I7? DE<XG.$ Aieq0<^? 岱齆$>;/=Qj]2 yQz6trKh$-.;MXZU*`DH1J)d7Bi/UttYA-W#Rͦ_U7vjCº,v֮JJčpwnOJ=#cV5xC߷f*'KEA_Ov"Qj{;Ǣkkݭ?x2JR*v 5${YQբ|CC5Mi_uD$J]&gs::}AWW@C2'e!O Kb5sĕ=僾AqJN6@nmڒB#T|OYlPAΥ`XA.$ղHOB//lUޒW)6S3ȧǐ8 +3R0/ғ F0BX髊Mq!ʋCrvl'[@cKNŝˀ2GYtlq#)8|Zv4D >oඹ%ja(#'MxEyM!Pmc샵]͔2 ;.Mm~MChdx)AnÂ5X;}xVtj7Zb [xGb_|ڐ(gt`( @Uql0>2{z+礈l=5D{Cq'$Ҩ- AT}?5Bs7)e9t>÷FRooK(:ˀ03ZZam հZ{[TYvZ`n` u"0{/TQ5T/=Bn?✟B@oN?~0a%B^O}4$YV1!5+a'e(*Ud5s ],D *UnGC*SfQXaj~d43<*S@hEBjN^F#˧V{:{9_ы ;œ\5<@aEL#ϧvү۹S >1' ȼ%(Ei^>*Y'xW<ސck7Urc~zǿ]eC]{9]C;; =7BӖ$x"MaAhA!GL&o2ɛL?ڢ{iq:>W3ό#% 5`"r7E29iF#A!4"x\,Lҵ׫'tb(cG& Yg)΅N׌Q#95/gyA}q Ɯ#f9IZX=9|򾬝K0 H*p8kGM' |5]{Oؠ+Q*~Vl!Jy]bND|dָ׶x ;:8J+yO:!J1#{ֻC]kcQ%ޘiXwQK9ta*B1|7wd4}˯9us=(eJb 'V1vbXρ2M LCڝ RX&T+#?&tb(^;1R\YU4 W֨Vj-G`TK:>^vc+gQgtߩ] KAj98p2.6.󁶪VROUt]vvōsbZ~!=oMCc:J%.ļiGF)r2cB62](Rvh RiT]R^ޡBWCy\lݾXk [9\_bĞ\p:z^8☉~BF 7G`~WwpLņbfły|x"FA V=6Ԛ;w,]uý>ޅ!dU>e?e_pxL4#A7_Nԁ4Jp2)q,9< aiX&Tr魽xDtr۝H(wGEoM#-oTe²pjIp0t)8/gnVe (Kn͹n9xk]Y".pU܄^Kv7QD9xFzv\iF)t6ΚMV,(Y:9ǎi*E}67l{6Ѿ$6q`_Ɣw*8mA7fķ},ħ3%D DžVu71%XfTĨ$X%<#y+ڀ XhҊBywK'<O HŸw: '*y;N^~[w-NHb FVtǫ: OFyUvO202?rprW`p%!|r$5.FU31Y"834t د- Y2I,]>^kG0F>pqK>)0Y8I(Oc>UH|vPU:5|mP_5\Xy38@m PTѕ. 僉&6)ׅ [\#BI$R/)c.c쵐!<7:U^=bT6g%@XN)=RqHo9!jR[/.k!.C( wpx.LlUY}nZ*Fݼ:ΈaN61lDi*?rb@~:tQ"f6Um}nWmr4P/Lq3BP m]λ᳷<Ѫ (fi%EC I*OEK SL2$5 ߵw҆Q;APΛL:ΛpCy4g=]$߬yȾWy9Mcag#}e;hu; Tw$KsNFvEW7mhLnY[62gYa]te@])'Q8!a2zԱ3m>F+ԁJk uBe:+"8rq7x㜡$62 ,GBXS/=h{Xh;$gxrPt/$KvkʥݹSojgX?L ?D pd/u0wcܲ,⳦"+4=V51]ZJ.׳w fCr,Vi)i-cqe.cD?6z΂^-z/憉\ ~Ѵ8zmOA}O>0KCTjӪ?N9D +KoWoYf[ nb6Z?k6O>kؚ#=t[owv8; l{ i~# E wj!b&hkؽI6V~j~p$ɯ7_;yk6G4_r{"󨄟XQ ۯɜhC˛Lj ;Ls4z 4_lky8O& UgQ Hk)˰;\0qf{a}-4S+vM!-MQAj(Y132N"[7&).Rֱ,3PFc̤aLzJ opG% /ʯu`>W'Ϳ ftWMbSڣC53huj{߅JkNa"%n/\ ?kC O1ȲP"sjoHYB],15zf^ڝ'}ىv-ekx^伴fW'TqHҼ~6zg>~a_&kԱ#-' :8cNԑ9?MW[?ȥ,T㑖>ܢ ڲm"qʞ<: jF>~LCZ$ٮO <7/bxERxԴ ~^ ]A9ՕVmۼadVݍ7OW@ f7Cz9S'O0/$ f!+ʄ5"c0`>&E0$L85T&V,M҅׵p_=k1̨ o;~9v?sT?Oӆ)}2 O9UolA[TK9k9JasgGuQ7<R1^? z_hVpVSR/Vv}V)fY-%%ߞN3"*ߖG.[v[ kbRglnGo ˈS!H")&KQJ%Ռvatsk-9 )Q/é긶44f'AXם atܭq[yOEYJZV<%m|&$CX8vrp }|.UƺmC.?Hz=>ҫfliH8.ٖJkzծĤ yMһUT>ǑGz͂@~16hhcQ\5Zx%{_1qLW9^k @Cz&`|2d~_ w3/ip$>b $r;ZC-uO~YɗaN%.=wʐES WA= DžbYsݖ%Jc4/ q/fSF>MLu3j wh=:¤e_HXȭ+2.MwIt #n)_%:ot rP]A׿h E}¤9藬3괼=9=L p#2cj=]O^uOS!Z" :Ӗ=PVޑ.Z8`VUwBhܛ.8YТ?Y􈄖!>xCΡ?y.Cg~&p>Ĥ +GVi;oPqG3 EC,sSf]=I1`**e32x?g𱡵#GB)V'ί/8̌yFIv3| A#_u!ͽ۠/ldUy3-wfפ/5oIpT\ZչGkH?*e_0SV8T=> QDx߇=*l܃Тiٿ&<_~Z"VR(nmanqQtgHu@xr~SIf<JW|jiD >V>wx5{Cxfܷ\cI`\/T)4P hO4|ECF31m~NMf=h1aF}o %G4QR QlBN4C)P5 !89O5$q1ynWk9yzi6AmXFEWʫ!ٰԸ_Uxhd}&cbaX#ݐǖ`5>%B ˌud>EZ>8ȤKnKsl.=7;HfVRL$=2P=OqD|(PsNpsIvK^48e8D q9ru;"?愯ˇcXgz#|P' {'tд3>AG;CaVxgKS7( vkchj//,~#WV귇嬡V;{Đ7HfC;pB$>aF%e6Rx& C_>{"u]@Bn3j7^ duP*gB-秧p-]vH Zfq7ajfЏ-jѵQ}| gl%$2=I%N]SAlH}g2R{DX)ly?)`cȎ(nH`yn؅" n?fL8u6O Y^-dBMcK&ZqZ@|ZN,ћ=I(ot 0 8E–_g8rFMҞZ%[1||o׉sï]9PH)lTEY=\fUyw!{FU@s6 cɶ~-ۨV JmS3[9Y۪:$_huNw$~<\U] "'v Ziw.|ʎW2PT Gwd払^]Xôiٞ(ry֯T2zǜCƁpljTs~*N(bV Ĕ E8JgWSghzt.dtHp}8T !SV%sօ)}_뉝_@S:DGnC5D.,-5S4#.uE,+LI3W:S o^-WCv&XXs'ۛ IB"%9*q3\Vj]dU.CC9HU뵯c +1"7IoC\h-+bM.%#ʙ5k--wP}Fad|"lqOBeB4!\u}Q6ә2 SzCRkqR(Lz$Lk=UD\+SbydfӅLRb@xdE̱%se:ыQŻ#-숞SkǕE ^a' צ/oҧ TpusUٌG |\ZT#7:/=-x3-1?YQŹAbF,]T^uрR37Tdˤ'M*gL8fS`C/FV.m2[}}FSzۥu .s4E)[3d]2{FySр$CU }؏Ç;M+c"r Z \06 U E#% oF~xKܜ.|Y\Yܟ,=-̓yBMZ:-)iB64b&GƓgּV>{3W^d2Ns57LϢIky&!SZ=HqTID"fP4 zgFI"Og'pEh朓ӑ_O?BL_R% 5:\vx)s)ex[O2ukUJ.r5J=\ DB`r"Y]vd #"8%Kh6rU>V gs`D|ݒT&oblԇ:y]ѰǻV9xnZT`w`!R/breGh[7e%:<XP g)jCG YkrʞGZ}$㡨?b9."=X$f&}5ϽG^lV ,d'sڌ;]TXE }twB[o vRܰ@sU2KkG%Qm2جVPFYb?j Q; ȑ*(dtTG3|)ߤ%p"~%M<2f">xanTjO*ݒ]C'JFGxHxQZKw1PV?>o[B\X4R{ , =\P& cI=jJ\hz |tV'KI\Tcl5Mmԟn} (1e{åcbmupXIVCE'9|k >Jy0B&iW`SΘF[miЂn5'-SYAgzxFbW󶤭jW[b`"QTߦFЩmm,mbc29J5?9ҕsR\eSmcb)FZ7rtөJîcQUìo_>D BӢo-@D_A>/p^yqG>RwwvwW~sg4&8~\:扟ϊz.QTv Nvjíp0qls `inbSw*b Ƭ/.!1z*,xs^|AWBvDžحVǿұYt.IMRqDr$Mk\"~4JfU/jwuh?o{C*+kF*V:ʼ][ |5՟c蜰(AVqSS(lV(!qgԁHMƐ+m+ئ"~d$kF o=ZUfZ [L%UÇNT\W f;’TNZqp)]2vOAK7p8̦)v!N؉|ـVy;OņYrG<pFQG+ - }W3{@DU>hǦ6fkRQtꮨƙD!i%&>6yZ6 +6Ѹs'l h5 >uX ]rW{9L: T% ]Rhdv  XN+*0ɼQVY1?"ïh72iXkrYM0ShW0|MOupGe ?a vxn]sYj᱋u֟*Z@7jm&iWu^3 t8i评EjtQ Slf8->h@gcSA$j/=X[`&NOVly(UlW$;.D4 ["p<LN4c{6 TCtѩaX1)1'^`ڸ[[!=~Hwp#aE~P_.?p(gzC1M(g&ߐ YWpT&ku)ceDL4?DRM9:j[L1rv*<РtvlAd4ZytRMȽLKOvc p ;zl6{=e36c7A-?&mYfD4o"EXn9 {/@ksn27IL=C`N:WZj.eB(seR1`Zu4hȺ`Llީ1Dc9:֑S7Qm Tg;ANzsO}CC"|tռ2W+CkܨO>)_nLjW(B+dma+PMBYt{_]"i~wmxF7&? JPFtЭV/J(:9mRSI^WFزb*ƣҰ{u\JO:Sk\n7$X=юs1)Ġ~7rZрΡk-]LOg)QZ8ZbSMLQ=)[;UJ@-ڪ&y(/.ư׏!'jGx`azOS{?G0XO_ϸLi_Ardc{d {nn̬C񥔜8u71;pcC2Y U(SK#7e{p2xb@fB|s9ji,tŨDO ~}vyi>o|Ӈ'w_ *rayJꆸBO*oWGNDztGr_kRED4Lq+ Ke2hO"Cj:Xi~9.@mD-[:K9\.3R8F#}k gq7:˲~ot|yvDM_nl7JHέ!416vl@K%r;fSMn͝l'=(֓ *«iERIƻ죥ZIL4XWƍN0>+L 2RgUkoViᲫIL۫^v׎e.'T@iwO NA~}ƫ74Vu&E'DNi[wR?E$z P"lzϡ5"! bM_%  pE?LNޓZ] !3L]'B/KC8o'Q %QG-u_ʬħÏ9,7JxBM\ *9`JPE7=nח9=(mµ*2ۆRK!pHd58:"FY g<;=_ޯѐ}Tctv3J3u[bbZ>ȍ/ɦɾ} ‚OGgiy&s0~s1xc.Vۡ2*_;S&sk`tzPD1U2ߧB?)$ ,Z> " /fyҗ=w۳#wM@rVoOhHse$tr*q|w,jbDV)ޒ6< t֢[ڷ~01Gkݼ,>G /~'[TA[=G& Ip\jj$K{K.Y`P uҌpH:EIj([& rp/fWVH&1ujziGBn읈Lu7Ԣi٦%QBiŻsg15 vs43u[*1){~5y)n9IcDj2 ~Jǯ'=MuJuy #jd-*an.ӷBTG&~CX!p v3QHϮ!XZ^~\ExM[]vP<+=]n/}%P't!<uѹgf)s9>]̭AXu*oyOCc`WNtȍ$6KGt v JVWșrzo|Q=ć޾5Tk[m AwRL]8A˻%[ݵcm搘"T 5ҀFz!&t6;n.)=4 ZpDZkIU=JQm8*!Njd#w=~|Bު#w;α]!9]?o"ɬ6`ѧ >9#wr[ޑVSqI_HhXZ/[ދq{ǂʷZyFfm4Uh>*֣ERPnw.Մ07|nw(S|bZ^S((5ϋ|_Pp}@ptx9+ >|5@DL0eo,%Jͻ0 R˲.i1+ :̘^>b8FA! Zi7!p6]$#ưJ=:IMEN0 ׶>A&#\);׏ ]NGJJ}DtyKaUivy1T׫_ Ȧ6pCy"tFgoSn:ٷWi*uq Զ`XgiC AQ~|(om +Ix8\HV:}8A5R"G~|^`r>`_ټ  `x6ʩ@r/#I@e~30};ʚOCfVk"Iroԝ=nG)-/ӦX8%.X"HBe0k|wwxC 3^G)eoZ؝k4\^MѾXcb|?vz Ȝ ݳa-O2d XY*UQ&7] xD#خD (2C+}(,8( cKъ՚22+pg]kH6ǀ˙I5t}YN2|-Z9//ZWDcZ0$ ̆`+fo#v`L:P؛l"\擎{÷z3P˾IC2oZ@9HEɹ`і EEZ -sOM}eIp[J 5 >%W$Lo/YO!l@īᰯwx^M_=K5ل$$ EKDK3Rseng`<5oFWwiGvXuQEG B ?= g`rno+4:CC)՘EaN|#ގY iF@*c"G2կNnf 0 5 dڧٛh? G] \D663>MA=y<fJ \ q(%jhd,z#XT1d(*P>h{Z H+ps^(Z/mB.(^ȧ* ІF*]8bbFUHdT% w93W^`Cu%2Z;{V5'r:]II A|3u,1H*gph!,R}j56"ƨnRQGg4M]=€]]2j.Pa<Ӫj':;_"?( Z;>VdzVˡSfCkUhGv9gFhw7v!P8&w$%8㵒*iRմ٬09 ߰H`[#|,bSAJ8h-浆+_g'VD??+ ;aYh#wXoVƍW{Lgگ(8dMHަd{A3pZqƐ}<޼~@+5.<^%g"0n@9p?X GġsKe(d:Pk\QAr2IҒ 55Y n}T &w~,AcOl)$7i~]c>05WejDۛOほK}__K ur?&\'lH] m=l`FD6` :55_R鲔1XhkavJ u "n%@ӔU*kݭ*(|cB %6?Z+Ԉ`XKZ8gz,aO0/B/^V6-v,0p: ;zۗ5L39ta }vvuC u- SY/-T 6ݨ,@[EZN_@#Q㤘H-|裪v MQE܌:\dt%dyef\-en\ܼ3+<~0LӅݏ@ꌋЦ툖c{wʼ7< :؀n:ek*%w p Vt0pqh>8xVH2[yKӛV9y{_TUV]Go4cI!ش!+l ]7p~x0q45а/;v8m([i,gCܽ;7L'VX_r%{=7+al!:?%ڳ_Gr`ԡe^pZ̑ A{:1z;u.j|%`q"`Ž7lDr| Ołژ0j%j^n 3F1}+1Ȉ@7™ . fÊja]Ao<)#1qOYHBP"S!G%O N# ;!S}Sp] ute^ y+/UΞYT?<)Br@X^]e TT2/S ڧ>VCJfsbɨS,oMvT3ڛQ<=dqZx*q3F7~.;/Vhlfal_%v4e Bܼ"(QwP죘 c85s!ɷwmdPsm V3 K3t wHma&^ Fo&z!mŸyV%u7֡8 gDܸbqodWliz|^xW<(H-7e&,y%2OFqUAHC9Bmk\eP-pÃZ*X8KNjb\Iu8j3K'^X!J-Us`]Y f PZme?)g$߇B%%sU=5]'F9ЋF@ZQIGGLF.N$U:d`"y +sኋWlA.h>!!cyDEnH y|P._뉶ǰ g|lYٮ^fԷ#z !:Iy0b7r*^XV^+`D]+]P£!Q%S4u dzNT^wK] :NUBתOS`SLFuq9Tʒ% k&r>k{&Pt2yP`Q3bý1ze Kn+P~!EPw:VKzͱ !T0FjZ:nW#$Uz䴶7*_~s^X|mp`.Yc4@3.wIzo-yHZ1m#Oe()RǷvZc`[vVmAVtfDaoټ6p֢ac|z5}&P@}NO)ȗve:bS8EGȕe=g^0,R9`ÖŰ;樯SѰ@lFn|X/5D }q;jQ6c]-Úvmqu(LV 5H"JC{!~q PbJ}`0w(nR2R V`CAOI9w+8}׬k5JgkeU,YSߣh'7_>SD$ZT!nt/" ַOFXx8Eu9ycꅋk8战\CU{bL#P*+Ÿ:Օ-Z{At`Œa ֐1D^?l+4Ϥ"zٗbAHhr}6SԊfX-aP%6'伜Dgu9qke8lJHAS%o#=X'>HQn-@|?(gi"ϫZԾYƮ`|LwU~ Nqgl(:{s`aA}<ԧя}<$*~Se~vS-#ł\ ?AٮW˾ft#J'ESR5OO^&ņY05zeji5$E2՚ .3 w͞uxsoN*evu_򼭀w9&xI FmTJ4pf87: -`ԙ лiV]{M+X?^J?3E3j/0<ws?z6ծ!}̂A no,ſ/'p!61#a-éACg[|J$=[}n@;/lq:U#3d/:|Φ5SؼRҷ^kӚhy"pD&iGꪽ ϲ()(%o@XDQA6WN9La Zmթ)6v`\ ǜf0OXBrh@5%CB 6<3W+|oM0!4 GpQxrYw61XҢx+9v )"y` }\5¬zM-KX?ۚhw{224ob~Ѯu=Vɝ ]aVl>;+dcÓ:tx>EH&ˌ"i.d$)'J3:wiFl+Wbz{o;c151^Moh  ̮E@Y)U9]ǮfbB_K]x:XIv,Jy:TNT2ɄØhKN\ilA@ء%5> EW-}pw]{Vi0uOM8w9=܈mbT3zEYե[%qOym4q&hľd~ 2>:O~$?^'pu0$gEW͌3^$5׻D=K ST(262ṈU:SqUG$U$~SqEU 炕[i]|w0`/kN!$=hȊ#sXऺؕmI$LIC V@ βс!S a^t'o17&M8e[~̸9x=^1:6t`@ڈO J;;yIQ uЊh~j) }8EaY;62n֣ %2خx?.o]6kt[Nv(8&h({BZhgg~sECϺ.NE먏YLXuI*=Lkl)XIC[w Z ~Ct8!="Hpd1PJ*x0[v;`^bnfFdq.2'J&<#EEdYG ^$ ԕ!~Ԁ5L ^$xT!~)ƘP*;]Bgݱg!疃kp+PDXLh[щ$g~&Ў`:d*D#Ǻp:sX&ULZ/Ii1,L'a_wdc%fh~+ OTnaT$@ 38*h"Maf\ho|CmݱyLBӊv(BUkXnW1!7/>ϬM)fbyK#@|mBg(hjoR}7/\OjB2HfTJeʗ՛D.9Ev23նEJ'|6zǨ~4/O!fWuh Rߡ=nrNdJEcIr-cޝB?۸DG%ɢ}pQOUQ?/ѴA_Shcoݤ8fov׿I9o@P,*3u1ezUԿMZ@[帪/j&bJ&Td:FP&T";[*ET64> Yt'@>:CK]AY@j1\L2=lP*%@u}03mk + m5mTOR^ՙ309Կ8DZIއނCU^} G B C/Xr̹Rm|Ic$mw(o])}RV#W (rQ3^M 0F_L|ŢYIQw(쓼?. FTmtoj*cu{raD:YOUDICdYx'@aկh'- n%1*TU,_0$|$U)0Y&8 d qo2UΟ+laY"Kb?#w%BV1YM\{뻅h7ԒO 'Ǜ:voXToZ,̧١("-(k-(8k%鉾Yp^;K%J()2kSkd=@4հ)mij5][v:C̭ÏmAF>,Z$Rѐ/tgp@NۥkBub5raI]|m$Cwj m׬7h!hhmҦפ(:A .ğ?m6l*3@m7mlQ-0S×Ƣ oN+jۃkp_M1TNgB13猸b0G drzrLBv=yԨ",4%@ t/ j\8tpY~&ݖiINՓRKR{=g|UR hTe|`4;rI"66A'6Y cyȿ ?͗Z"x Ax]jWNr,sD EK֢u9f0HWw7$`It jUnbu=wU%ݪaO'u# J1"S &\\!Rr.I[_~;R/9ύS^[]ϵ՝⭈wYx$#'HAގ'-9Z]xƘ&}gztkjb!,>I'VO`cM7a<΍ ^6}.B``䵞xEB[(bX+!8-C1.9հ)s8SwweHw9)Ttxv`endstream endobj 359 0 obj << /Filter /FlateDecode /Length1 1511 /Length2 7479 /Length3 0 /Length 8495 >> stream xڍT6LH(0H %% 030 )-]4HHt"4ߨsoZ3~v;5@z-]Nik%DCrpD: %HGhAB0" `$ #Qfp@  Qj PaW, Cnb v V`@8n;tVP_!XHgQnn.+a+":Wb U.@]gum`V+ f APwtc?Z;_޿AaVVp'g0 @!M5.'Y2;Q`w0l28 :W+ uU!0&ìeNNҕW~rP u/cu=`>6PͯݜaP7_(@ !a\[ F wؠJAm W;DA|[o` B,!P?Q0揌< 0}2CqspTuJF 8y@>(Z`_YU"Eu? 5}Xpc!n Yxiݿ_(9:ֲR`'_z_ݐ(Q_SCȟuUXCݜWv@fw POie*p_ A_ jP+UJyv ؋5b$A57\08@#~$p@Q$Ep7?o_ /u7+fyH;|@(OLa7WVnjSՏȿ_bE0? m;~Qbn3hw$Ma ^EJ ["rdgyk۟W :o (F ?{{kقީuq&'=U_!|f]{ZPl3V?4d g=MɀcI2*f}^">MS|rJqd^9G|1pTL9:fU)ѩ>?`IeSP+f"VQb~ÒųՍ񆏜[D|%H1Eu1CwE)?iJi]v\s?4F0!sONbm3j7 X}c8I'ݟz|=ب GVfQf%&lO ܽz%mjf5 GW >K?Kz+Æт2nuCQBw&T("x;l+e_AANZb!ORY_'FϧIK\h0Ns$+:xj|-Z/>eTBf۔ R]gzO.\Wsuhxi^)W(w#k= ̻L P4&J4=2*oTbVIn H-`aEZ6t7CRZx:=7$SL7hf_t>7?C4?+ Uy&7b1'TAw2lDWj9HzuJ*}r0"!8xzhPG[8Xw5Ғ» X|elz/_z'kkg˭f:^r{vjӬ[ނHo[457SoPeOa7yW#>6ǐi;A: ,ew?B^og \*EE{|}{3[VM[-@Og"9$x;jKXȔ8b$'Žcq6$c05!p7TzTވ|[_#=v-/H)@ݗRe{k؏` l%KXꨕq֎&UvId^ORGr,߇0&E]!Ta:υ>\[(-we5 λ2?&_@?$$_-ULNNLǓ&0l҃6oHqJi  -k2 %h+n.evkHjZ)`mz1;>mGco`K-@ [+KLuO63]4.=Yk=5'mh,y38\}C)sj+ifʯWMup6܉r\q]pFn|o1} VVe@Gg+ YmAaͷi߈l3ܙVkXv~J`P|(m~2bLMAjg o@JAPj:qtZڝT~#ծ*1VōS^Z!$$ܽ|_RhGccSpwl"a3UP löpPg)$OX]riS8k~欵}8.-ʅ?={gmpH=@pgΑ!3ځ4z{4II9!@ˋܣd{IGOTEs` FzS:5Aw mYLc;X|[2|3^( SNkqpy{l6~͜#(tK+H`@ut{5\W%?XQBE;WOWf>46z7uxZ Ao}W~oZУH*>PwH8v|#cМ -&3%_:2Ώ,Ed^sP* t,%F>ۃqn%KamT ƿLrǥVkQ\v~:II-1Il$2eb\b3V_f"cN>jK.6A.gO&fʳJ/ѩ1EfS.3lm@2ۑl>F%|6#V:ľ~%T:5NPaVO^m n>vA`+ $m|Nx2RQW-L4K*~+I6A/DsG=X!|'cۮMY~x80C\*1Yb3:0_$FM'܊[eRpȺazH)#or )=!QZ&hܱuyMUk M9DCaJlk3`u{M+?%Yal_Yr9ldެ 2/~bW@)z 8F>oяKhN;nL(s-ZeQO*/Eo[A5~Ǯ 77%DyK6>PԷ\O+~;!/t/17l%"lSF Z,J ;o/PhniV'$S.k гQ?}D}CQgqEWWE҄@Sbh`A}]vCZ]j-'>-'$ɺc%z;y^'tęG.=8m (=ݯP#cYg>蓓%.o:#-du女fI]'hf)rtD;׽+׏Y3Nq*_z>5`M,bt[]5˙~t{BĂV;|!h?pq\F8UIMl-Wt=]:ks_9tM̲Sslͼ$>g&Ubz_+C}1:NU~/GD˾,:و,/ A#"-I.a\ *gUSPZOphe#k%VR=噆}Q\674bvvKByŪe:.aϤK\eq2'84€MIUUtXװ{`22>}B47'D|/} "bq 2zlnH{plptM ՘㦞I*y L*sY~QC@E 9/5B{u"[;!7~!O'BwTx׊.i`zqyR͡oāԃČqDɒ+<[ߺ#ͿTTZ7#))1mm:y"^o@YcH8T+xfͭ0άpl >p6/] F_zFVȐFg,Q1P5ť][rW^K,El]?%;+WDܥjlyӣӛYm#X6o:)Î_rbGBYGK:L*iʵuD-Z3 W%Xr|V^=pM,Rcl$ h@qg& %&,_7Xj IїtulFbSw›U:Jy10f^w7M#;N)y&\'p132jT"}.Q'UF7xn)SM%b+t(ĊZ3Ah˭1ǭڳդv?sMi uЅX=֏ A[sx |f9)ERo#Оt ZUM-5X>κ D%m,Fedz4Q/nYQ HDOӎn?# l:݅;t }V)l hBp,$>xYS YWCz-HHz4j0]'ϟ;-]Bd+bFiٸYb'#sI:f^ڏw)Lm}YIk$Yb>s+Hݕn6t˞q*/tYN7W+$w7!4M*AaW2_2*_@˾^8:5B0rđv K'~)W*,W K# "3啕ؤ%S9C/,+qmkv,]dpʑJ;d:(ntV3:)+k\aS_sEy@zwnN|m0QKp 犂9ka S }sYgϊT=[1rєsU 9Ơzw1+JD·au$@]^g mD8|*2&m3Aj{ў\a֜\IKֲZ#maJk;UϼT^d7RYZi!|e => WV yW\ol=}Sabzщ;i/8z $o;j WyPsN%Y~;*؜LlO(&رyEϯD|2ajc̛7DY%YT`#@nLc;W4²[%?V{mߩ/y3pn7z:xtg}ph&}`POyP >ˈ/yR'ʶz~FsP--5i}uSsG ǷW Vo3 >`E!JS WY} B?L)O '2M`պOiBT=1uEOG> _%J0r=?DfWe2 =\}A:Y/gQOFb_/unJ-Mdyk\\пstJ>"~F|jOHLQ7q׺anu؁ZDV ^ TU $Þg_px`Evn5g/b~5 EZ! m$;2^ؖ[O rЇ8~)qsf+hdQnCԀ'cB?U:İ[ ."AsB3߁ rTSw>Ȳ?lI7x yD.H[m2]9wfo ۻ[4nG&9O _P1t~`-J#6?raendstream endobj 360 0 obj << /Filter /FlateDecode /Length1 1651 /Length2 8628 /Length3 0 /Length 9714 >> stream xڍT6LI CJ R 0 1t %JJw HKt|9soZ3~]Ŧg(( (AXm E`apH^0( )AQh5m$!bqI0( KG% TB@ $ RDzxQ(9yy q?́0/=ԆaP7!C3 !  Aݽ^N2@?8hy@a&9ý (? 07c յ0ğZ* D򷻿;#0#=8 wuUP( [DC}p7Zġ@y} ]_y{=PBp~A_2ACS{ѷ"ϳ#1SWK `(( a@3s$7?$tD ;? o/ 7o (  ѝу>Ygp Gt (oJA  0P}=(,ت#@?E<-/߾t艅ypK(?Lˀo>*>nn< uţ}m$zj s]apeQP#D G;9*ƿ !  UG=P0;2{EŀP//hb$ AB$ mDtDz~wLo/.AD7:"@#뿜UŁ {@etJ s'=_׿O4= {y|Fo21 _@!q y }Xto#`0{)'.UO+ׇǹM_ }j$K-u&I6s*7z/1Y*&`l 0=Ji|u3FgcW6 O$zy~ݪ=%Sb%_,NpڽcE 2QNSdܲj$BvEY, ? o$NIoAnj}J18A5BOA\;j T<~l6ۆ6jtLۍGeXGdAS%Umo?fPńip1HKMa=z6*3êzBV "f'V!rga˅?bgwd4D)ez 4Kk2X[7ii1G:cmybr $oȆe ꏶgaOFȞ OF'f%M>clgf. wJۈM݋P:I0 &LM}dnlMOd\Ve >o'_>oǫtA[Uztb5`pͶ=CԘEAYl{}^Q4nc 6~%ig >; t.~eA-ӎg6p1^o?Lߝ]7ʸ-T\zQ-M3jNk3W|zz~WTZ˴gV/l1-^夌bgIێND_v2ti쓱{M tJ՞0ώN7LՂ9E{@ӹ4y|"岗 ~˛9z5VQpAȥKֽ SP>Ǎ0Ohns}̉đ(axp#^ xtȜu^ܔbR&֚nجÁ& ?޲+OJvѳ]ݪBe6Қ;,Nh]=EUi\%DobK1oL8vQR}ǯ]R#.hizU*ДT>2w׹G躸CG&־ +n˞XTƈna}\8B0,"He.A6ug%JBi1ut-1V{.7MM&.@mǧA{{/gLv}hƛ7ÚQiJ-i-ZYEUP1x#3ɔz]";r3BB t7Bq{GÞ%D3{sJ;V(&yZ][>%YU$OTu v,^ӧˣ GwFg+JL)pMsFDr<3MZ7k=41:H*@|n h0H8S2:EqzTU{I?tB҈ډG'3"h7.۫EҬKQI 31K,AgզWE7[Y1;U.+}SAC)R7KRF[Hu fa/ȏcd>?Y(㽁w{\a.WtW$zYEV|Kr0fxrTnt…~~sA4yɦ_ ꧧr0I7{51 jM51s[5 Nr 94=$ڹW``,;j8cfj4+>i`L .a<[L:AXϗud!r.-b0m(h{~3|$qZ*em%~릋cvxQ(|l[4xg&1#tp%‹99Nla;jVS֓*CWֻ VŅ9"?ES*4VfME~$&3$:kg,V-"g)3jMgвo3Re;F Q*iS̋tÞTMΆջ2 n |5( pCkUt + 0:TQ4B8Ӵ6׺檶u߫"(i*%Q˓HS-ݏ;>iW+&HReHc$Kd t>`[Kx[|Ryd=!,!1ϤY|?9OM_El=Ih#&9۱eԶW_Z{Ko߳oHB*-ÏgǨhA$śó[5]Z%ObfWe*f_v pP֘.l~j7JI ~I Ǟǿlx{1Sv:2RgO \^ǰ'k p(10uS_6TnTj]ɫ6tvIkcpTmofϐXfu"~](yM ZE_ @5 } n@J'+7tFiV^W@Mz뚂(}\h" <7(b 'Q{rզ?]cՖRU9w4uN$h?&fW="Ҝ˻j:CJ2`jq(ǞI,j9UA[HV[́VYf^lQթwoZ%zwi̢FͧVCEM:~u7@#-G)?XMen9mByߨ@@n~7N=RS A%qop$'N)Y$ ~ 9]{J! cɑ,TI6˽vQV̆tC/*yj$1Ŭv+bM/Q$rFT$mrqtOǔߙhwO`st}baQjoCkB[ nܷÇ{K"q"vB3v?/ ѩ:-1+ XV"Fapr{[<ت=s9upy㣼l-kO/fF]}}cS;NMCc[(18AI/QMH|7 f"6$A8Zsu1.GQv zD D҆+U7&](Q1 @: e25Q_2{@VQЮ.6(.l\B_k3TOVU4^HϤ* = Fm7=.ɓ~g_p VeQ i!DUqr]p)8 [ #?E?mțYʽĭHʢGo'Iʘ'lw:fj\*gհQ~{tѸgߞzQ^qe\Um`<((=̥gtpVesjFz|Ƒ㉣ TTIXa)&(%*ci/N"N plcM .&aΰ4Zk|t/vЪ|/-87r5bFƛ@u4ۡE GH\UquܠVd /Kkc] 7b\l_ l Sٕ4;opT1]6"[BAu" Ġ%HAk3:MAq|SW9w+ Ư.g vN˞JGsto(y|H7b2q5F`Ov݃r)9 ~=cI K!I0?'5 Z߾md/ CsܺᲈgC.}gzM o%St%l[Ul Z >|ʉAVتbwp즭/8{>[QqїTL; F&/phՊgi=4B U"EOoyDٝ85|Z z6Lq(7LEwb'pݭr>~RVx||x-稓D+lSAAWދpe|۫(}`1U?+[AĠa)"wްG|JͮWۊ:P pŠ)C"bQnEXcMb|<掍7 jgX&*h ) k{[G_f rR]{=\7p Hb[MM.r^p w@9_rnߗʋ\,ɯ|ڊRv~!7uggG_kVG IcdpEQ҃|@ !IዓC[π*e~*e-uS/f(T^efteeZtFj?a_ * x4g-Մ]OyPcLufVi3/93ILbvp&G!C:kU瓤PIaͣ@zhdo9z?3ưa|l ~*Q ɪ<8}[(Uf]g CqcK}'_δ=?KFj[4+PĀֺf=Iu=?6WCk<(Sq>d>\/ 3m Yt~5($LUGƾfjgJ6a[7}ßLrսP2ҋN)Һt`u9|)Wz:)]; Ma|Y Ũ\|X[-dhO=xǿZ.Gh4pISaAhH00OE{'M^ NImOm>`,qq`܏Sgy *k?Dԭ]Ek-5>$ھ2>͒; \mT IXdį' E?/f1c{a>AqIV_2f293}N[k0m18AI3=s b'UKDvi״{iNs.i7zF~2PPq?Qw׫= w+yΛ,g&uUJGl*=A|sw' qUB0֬~\g׏)RIHD-ŋqT~%}OvDw0};_E vbЭ@Fk[^bݚ=g>e'X4V o$mpu^jAP%C^ f݌f~:+ntL|ev (3:?Qp 8DX+f`Ksk1qGҐ|nۓA]w ƺ[ Ub9e.C3$A|/ XJk*C~MUOy<d53>׌̋~N|5I[/x 0ƇFspޚ8%nG jKcMG6sQJ3@5oIO"0nf~8PT/=S`01/M;_UO3q+xRR.gL2W&gR},+sxDaAE Y&oB-9ɚشG[¦ G*.nυ4sH}!ݡriejv.L27 nmK]_lmjijDY`IYd_Z꽃f(~;;JM~Ll>BJXvR4B*ֹw U?\<`if}",A"0Q7$GA֚Acbu!qTwna2`&jQQe'&ߞw.a9 s8{B"lUJe"ݸ*+Be.T@WoGz.S]"O/l^̓ k]Sh^}g2SX 6z=ħf"<܂z|w DcMGξ.2 5[>-$Lsgwh%ۣλQڟ3+0(#+ֆB=h%;;xd#η;R7Sh <\o< |OsKR$1?O~`V˭N~fG?I;KqjdG2:}gMEGr/9@Ol-<4yӺzNr:5۴֞>\-] hL axkKD\NJȴendstream endobj 361 0 obj << /Filter /FlateDecode /Length1 1766 /Length2 11689 /Length3 0 /Length 12804 >> stream xڍP n% =`%Hp-|T<{^{w阮XAN(ncg*01YU@Հv +0"@}7Q r0ع8̌\!ZrDAFYzh@.beb 21G!!K-P շ7Ze4Է([.ޚɉ^Ҏք7(퀶@#r#TLAv9m7{[6޲%e@_d7Dp#b}CC+Kk} l0Y2}D} ; }7Ÿąo ggh Y0o,6Od 4|wlv2a`͠ 8%E漙l\ lhGkN?o<ܬo2 c#`opdh0D3oo r|e|k?&>iuG *EUou [9ؙtl&&fۃQ]ǿJ\O){@Ks#vs.#kߊ,,SE[,\fuZRՁ,`o 6yh:.zV ;q3HdohWeWc,@` @|o3fhvؽu.oZ1зwAx;7pczJ# `[ٿ-I["ql?L"fA"NF?_q XYC5ѿ |Y7vi/F1|Ko п[qb-o|+_-["Av[^z/O?k`kv9o : f yͪ[*>:2"^SsS὿bj _G?b.x7f4YsO ^i66آuG{8q`ޕKKld}֠t-8ECks3r s1=hX,.'' PJkpt$ݥI }b3fb-h#!u!~';8`|S 1߇nXY'Ur{R4UDD :f yivmٞ(Y[> FK%p7M+fAWP,gM8Z~L}&JF'GdK @?f𡐿3ͧh~;jYEݸZ+8 t ^;,༐Au9ȉruh,dT߉;lUg\ꪵ!ѭ|N|+i"! rFFlխ`ec+ 2hZ_2SzD=,|qtS7qw;]R69 e wdqpHRRi`9 Q,6blFf5fU6L}<ç$(,5JJЗ~5>r7P}81/*waYfVT.Jza¸\WNJ_ ]zC $.8_Enޏha}]-'O@LfV2=k+hQ|w>yz\!aUaQ }ߴ1$b jڂ#Ն퓶j鉛*(X#R((+DĪ۪n&'+R<:^.h<*Ù9>n1'g^z"g^##lˡDQNYޟ s' h%N!hmgTRfbSOr ėNuiڢa-pHl/b$ADCjXsy VIQEYW'։y}_ ϒy &V.96'7է-0,j.)]:ە;_-X࣍Jwa1&DzK3$?/#d/>j-h gY`۱^? ~ [S>gv bPo6#)I z_j l=*d"ŀ!G-ϐ99bl& T5ƅaFS W)Cd~苗%c_vMKzL0G֖s5tFr4;viG;PQGGs/BΔV0gM *ճJuj1ہב /.-ki]Qcw.u'ڞ>OxUŸr&=N|{c:)F$GSGy۳ᲺD٢Cro^Kqu"44s괐bzQF2G7q~[US|h~ʳ8HRhn|&:3&φ?ND#5oh.zxa&&D֟R~!) d+A䇢, ~1M(ں{/DLg"O7L~φ#g֍1|!2&B#Vto 2,̏M5ca3I[vsT/KqL<*}(1?GzDф(&[5S'b緽wdt>"FZ)i\fSK+TqHO5PW3*t,m80\* j# 6 | p~aSgs6zm|0p"XZWqk2/E? i]- "&ǣXRT<\ecWƪ 4\z/wcقbX[֛72QH *i<TRFUr&Zt5ghԼ?븑4+_*g%-^$םu*m`>5%R6 s̞XK"_EmGJh  'g1v[5W~5ēs!/N47~"u+}r|v,NcR5O<'Z87rwY%4)\kZ!#liIT+h_>JbYwW_v~~]Nl}BŜo,(RٵSlcۉm)y5*R`4c?]>yR f`.c/"՞r=~qv+p ;(ģ9o8Dͱ]Ӱߤ_'= '!?B.5ݥ1l\pCZ4aMeϩML*RG L_⣉.ĸh9he$w,">diG;i˽1k{{'B#i>!.UhۈѠ*k [6K=H5Í\/DMl]"xMF*Z) CK^}ycQUXN i\-97́u1 !Q5hoٟfq_>oSʬVtp;8fa{\Y/m\8]8v|K,D>'2OyWYWeξc^ %ּ"ej'I K6*Cɽo=3Nіg-y">K@,"w!̠ʆ\x1B}SRsߡ8#C<|^g-%5FR2;҈j!SeNR/ggPDnHM^.]/`EyU\3E]GsL vXQc $h! ," `]8,V^XK.s.X[HcC])`6TIjΘb˅&MMh,1R6iAʹ?Ivar -[l˘3<%:{G+Gp:[bfs$+tR0Ly=ٯ" Κ)}?.vϞRB#~]Bs~" H /" Ԉ'lZ~[p 'g֚wNr4rG4(jkE,$ul"n|FO .%pbsW!3U~+{N(8$qՉ_ƵN;!9V&ܾu. Dp7Xbc9rmzpX#fiwOψc^g"VHQa{dWػoE6&3;4*HYll_c-5j@&a}X kex&wy3N_}ba{riPGyggx :] ڊNwnRBF4( &^ܑ%,cwh;nN#oμA8_> iEf$6c+.Lڑ|޿1|/fp읪= !k4nuwOejAaD|dlr/GmZX x\h12g?s]qWQ۵5NB}08?CZHǎ j] }Z= ؼWp Аed[9 ;OC؟/gMOt\P^2QtդkeLw9T@q=SQS@_nYm?% !VώtkĘ:Y.(~ yڽB4"aDZsrL {Y^1e4SoDXcB'OK}SZnJ>"=v8 ;]5X,čO #/D6&X{ζ x"<94.MDf% Be@73Fl*/Ց,/I-D^w(5 `\8fbntI}Y,Mzoqp,P/=^;1ȌCC!o>&x;Xp@Ǭ;]܃MZ<C PW ;LFluS쿳isbh3@\GtA׸3iSM/Id8) ;Ẏÿfjbd?"Xwc"ۂbSx]UTH\qU?s5MjCpL NXx~CnJZpRtcUH~#nc@lJt,!1׬ƽ;h 'q%P5DRF.&Y0D5[T6>*߄._1) gVJhۿ=FB#d{;XsW)g6IC:#=6:V*Y40s.LlBj>^I;sx7m~_] ^qrr*{0A%eͭBT| Du9/ҙ9p7p] 3k}Ѣh15B?^|fs3]AU C7Pu8SM SitGKSB޲ AQ䐙kyDPOE <|o ֙%{{8:-[΢NȪpr]^hYA `ȱQ}C0~-^ |2kG|I:^5\fw&B *BuuyS n&1h"&K5I#4icВ {<ǗqF@k}2AgXαKS@ēk',H$3#p?E9u@(V*`dq[¾cu;Ne#<#E>BF GE. /_\tB[M)'U i0н'|dmcQھ3:$g6 /a#,G?- 4݂R4 ?A6tg1h;$ħw(lo~AIpM=X,29y伺d[Ÿ: Wj;76ÍhRs%]{y %4RTGR y o!409.v\Ld Y|/T I7g޳5E7{P &Sh0i̱ʇw9b-(!rЇ  ~U# q 9ay\c 9C/(t'p㍖y['4g977})G NucI\6w D̎9Hb^3{4!_(jw[eRoRC{7Z>շ-Te?qzowİySc=7s<.f1\$¶옲){IC}]Q}i:Hb y=Si=OT|C BΪ+Qx!,SVW>xkL*ãc|Im4{-7!Ћ&yJ>墣,1zu~/fchw͜TKXԨL·p3x|nmqD# DZ`TԇC5.ҦEJ)w >cx=]H%i}ϫ0, W䕤CL|ӮYmzO(䰤`dIAC"C^LPckf֌RRSJ_TTO/ `Εf*D sf+`x=@q[=h B+d,NJ_ڇ>}&+(8;dIqOcd{LÚŜe>YI =~g۱ :bN(P-5sM"}udKFK e=`Ǹ?ZQ2hp]Ќ%18"jQB-)BVe0P(s8t]GR@vo>b<)1"ҍ$R*9hţ>1zSX))GO? ڰ;bQc0@_&|4A{7d`}tLEPG"{ԁa*Z51/55?<2͙.NX| x57AᳺDO1z1uYLMj1Ҩ~_ٽP~:?%j[g+ʧ O ZHll&xƖ} XF':3jo]q(xġxu;\ Z,u0D*lKǷ@+ּ M\t9bݯG6lDzqb%h_:V۩I>ۭ"etmG-MuWzz t=EnæܪL rfrw,;*tT0ƒi0XFM%G4>c+u8'ͮYlS<&s|Xښ_XFPgJEՒoWv#?+–][bgcq,_>e@K%u׌]tO+.RT01cwESS _8M(يFh~85"o8N+=Vgmk=(ScH_Ԉ <)DJhwj=Bn/( X\ [UG U]k͈Z== 7}ZqL=@ԹeC"O:ťFaiJ( '*?LRTq@+3b>cߴE]X~cv @EM$\t-uTP1e%*LB1-JԠx̾9ew_xpNSZngYN:`JBer7-+l99aZrc ehqZI ~P? $ci(j]N?ш!J2]%tUca[#'K}bFC}f,4ٳPՓE'=y$ ip*c{t?HARލė剉a|Tzs@|p@x.$g .n t;,X2IF碍w}e6sG _ϙ)4qiWB5rB /a.zٗ O]xTY>?љfioNM4wӆ/TĠ;TaéM6}fJt[eEEIOC /׎ æ+% 6"ֺQJ%ia5{Ost5΁G"_~VE ?K8ERZ2ZcNUBhWI D&'}{mBgό]5ܹцA:I͚%N)Y']HĵJeY7Qy0nVt(m8KO{Rg4S,c ^. :_t4)JG23mțq!f b7wxmy..'N=(M,ҹ2D#&YrpZخ6:+4,O'Ncq>K0ЧnWoq$vv $ ݡ)ܝB5N@f);#Y2PD)gLQCchPt򴪂]  4tZOU*ê#tyr4/,cHYT0rg6ø>QO޶5P9s4,˜@1p&V?'znu7&|\}"_G8qVJ[fR31bߎgĻ׺ЩgȊQD-E\b+ir N.2e&[] xiFa&b syGO^?^ѺԂ$ ҰӿDO6M7ۍyU%kטЪ4QCQϔ/>1Ԏ|hԦ ewQ|?H(X% bL(&"d׎9j0U]U&ldpyۃhf`uGE^Z8eMѢS)ŝRh ^MO#8~te$pI+;3_v-J?2Zh#8$Pz'"mǍ{Y"[}8^#2[glh@B&&]z&9_F]%`u+ Z0Gć\1] +܃;"IFgM\/^3qTCǝ߱+:'dbbބN- P_AS* 7$lA.yuS&6f]67G Qn!ՈbS̀l;w5sS.F=Ya>̺?+# ׸CkxX|@IӜTQMqM̦׻:(nA0ނߗB mcҙ^YYwc+0,9.ʉy~+&4mKr=[3Kjkv d3 kQuKPgrjsE9nͦ$͵>[5tYJ0c ?]б@^{H23L0 yGL UO61IL2ajyXyʸt*޿i"\q9R`s!;soh@ُ -~<6ՌK(dRz rO/%iH’ wpiPįw,.}fڑJ?h۝:+3p-0ݯ43fay FE+>6G]80'o;-'cJ>:/X[]ljh7!!:W:=psJ6HF.1Kv nvOEsܫ0)'D[nfׯ3E;A%ˎ{dB&bcnWO)zzs6D%\S&N|]MCAWV52ASV~;]`5l&V*²@*:R?bendstream endobj 362 0 obj << /Filter /FlateDecode /Length1 1520 /Length2 7029 /Length3 0 /Length 8041 >> stream xڍw4Ӷ]pމXk]-J!zF(A-JKy%;}Ϟ{_3of~,.-xAb./A >\&&= t!bCB<uCTGw'>H/")ȃ=`:7Cp.Hus_ +**;q a6`8FCoN;('7ٍJq0=q =  3ָq={]7' v ]e5@s8/7 A8089AME5nmNnx!. (h sAq~+͘rgg>ybs3wo?/`p[_mغae?97&lP @" ~ @ly~eݴAn~p}t٠k'b$ 0ȏd~0[+QSVzg;ee^//'DEEaAQi?Peڛ1Uǟ`s?؀@s3 !?+Rtwrg0'?7uG,:fM5[{QeC$MՂlv_Cn_/z8޼?nDٞAZ3>A!DqA7Z|yo[7 nH_w*$20#7n|#>G  pCvqrw o˿qG"ov o&@ 6SPw2Ԟ\+X-q(/9jwZVP'kMLQyg%)edrC!*'ȥ{{4V, WO;*W ~n@)}5LVz1="꣱Q,14]OYhXK~N?SOIB%i|74fYSNXڝڙ#oykctjˠ!qN1JD )i}Mjo(jI7 .9IҢ[PbM3v7hO ]`_d$ $קp;8C+p6\Aܠs[>-cQhpIe"9;5uuQ !rFC\-+[nx 2#m,pfwAiXP MK[Ev7KsDrQ}p64D P%Xd\(=ዴ"d(l.x|̉?b}kN,C.”mt`i>#2`2+ZHxU=.(efn䛠<(8@dž2oޫ]xXˮL"GP/Dדqi.}ͬ}ڎ&B%u >yLNC.A{;`w_*V%޾NߘpT&ˀ$^m{ݧR~6d[wѣ8Z'7A{L(Y Рk4d>mCPWS!:l;ii;B>; iR a)+j[v,JĴǨSw5tCcѣjGͲp爀Ci8fqՆF~( "Q+4<ԘO^.>4R7Н?8[\PmKpEΕ(iJW"E`_!.,%\\6x,lv>jw*޾9V(\ɢE|dvHb2M6/`::'e/yDND1뵺eO តPbeJA`F9q1= 0Nݒxmע/&_Uu$FֻS𜊈hj.g2ӏ H5x Ӝ4%:$J$]Aj+ݥ/ސQй*[&\/JQuMH9Ė<JEޕ^Iٰ1Vi9vIzD ic>K0{ a)< :QlYԦ6)?ϴJi=Hɲm,WqL " ?|$9`fSMe|K٦CAˑ)JP"T5f@܏, }3s F fB;W+uLR)} өN8'[^#roz4m{f [+!PπfedW ;9967{4 iBda܇% g2xBmo xQhKrDG˛_S"N~Cq9Ɓ(zOû@:PG8; nL|"+k0rBJP<9~P6`k #(&n;Q$v;NMWVO`3zH꼭o;F0֞EĹQzcdݶ7"${9WX[MV;BM"o cŝz!SV'5h*2D^'G.#_m>2~^U+;}xp;*Q 4NƀXl&"YȜg\fSw8f-RBBx\oS2 .CS#';*)!AQ۸yp̷鱵5Leh>!ω)U4Q]Mgɟ3?T .V̙;!źm}G΂ޕZgYCU/.'zr&8]HcFdr6SCP)vݾ4݋`Hct΍q >']ϐCޱUcĩ%#Wo7\*qeIsex’ުVWh|J:wthi^p_I*COiPExu-F/_)OgZe84he;* ZNX ~j}W 6J78-ˑ tAR@wyX;ߵnwG=le(u4jJ?Ūlc8Om8F}]|=>tO%>⍽[$zx(=S VMXm+w0t vnza?}.kx!e=Y<02m5G@t:XPU}o &:.qޮH;<ɯ}XڻTٝiP.6_OԥSMo 93.F`1̆;^a~D_]GU$] $@1iw+n.yFV{Xd]׽qwWD=>2m|c|xY?Mq6/Y״ MlsQKe3&Ϸ,+ &BfA׌<]T1Z3 m>eV.pV15&/ıހ+닆w hRۢLJC4XN$&>2`֒о} tALFZa2!Q:em#K |;tϴk^#gCbj v0&DuW.*I!!KLN5t;ܵsS?TR+mE9xtnl&P&XZn8)3Zrc72< 4%⦗?sTM5{'((1bAcr{jk9ќTp^+/E$&N!\|&+)} R}[|sjdXwXD( HE>s5Sʴ~HŇeokHv&NJT4YzۜG|z-y$O4p/F#rC4Rbk <Otӻ=CR$^mIM:LGjMUbeTZMuyAz}}ft[8CP8N6i:CMLckQ(I) PIKA8WL*;E{Rz\͵rpLκoK\i9%Jr)e n~DOȏ8?WJg$O`eC|~y/<`C qѵ@Eo)48x55 k}{.6:37N3UgWw,*B\xҟq?hzN@3|fLV>mT<+j+Xჽa/5y\hzI&zSI^3C}p-#U.m˙R^p&)yd o*J#4֋nuarP+VŸJ,j*⪸di| _WmZe ̖X",Jo".Ckvaی$%O`3aj:"su{3pery"ӂ7p z6ƁWa?|Gq|',YzODs%_ 8Nh 3Zb`?8{UEKea׭a_SE:݇0`^Nк*{_j?,,^âwDMaȐ&Q.FQp+ozn}^srzh.١Y;l[Սɓ}rN|Fa)mJ9H!)h!LXu¾Hx8 WG_cNZ~öBjU|f{qѸZ>qɦFke/f\?j|qg:\5*c㫏Rf坝5=VLxiPN9=jO*@%0zS[+Z/ atBNEr4^B%mڜ=]D}ke%{]y{PŤhMO[IT48j4z .?[j4\l Hw92ܟG@ JO}팸u_Y*]Ǟ1oh r{F oBKԾAV! #uRfIL9%}9Mhdy]*҉ehLO)"℄"~z59שeM#枕M?:!Vy3z0X{?$ iHRFd)?K+~:u 'e*؊j_] RD_fG<"1t`7-IȲh<*YsB@8^9v)|)M* Q ,ˠ^IQ9⫦*=r.$uԈ k5(O#Hӓ ,&porBK-y/z"ʖ!nI5p IcB }v5`A `pH=^m@u[.{}cA4`!pե7$^1h1I%x?awVSU8Q91~0&.\ҿOшYroر?H? *:+<,0'gb-JvvJ4'CbVt0 SY'*.n(4qy~9I.pKs.ENиI(w Syª?{GNx2Oo&=O0+ܧsқsͲꚴw 07mpy-Dz e;#iO./ؼ(hG{rBXpi[ɬ ߡEMM"t>aՎUs#1s` N'؞\n"`4KlQx,va= 6%I f/dU]A,&?؎;9&ϭgc{.MI>~y|{yY"J4}DU T3KHoV$Y-ue?B9^<)8endstream endobj 363 0 obj << /Filter /FlateDecode /Length1 1416 /Length2 6295 /Length3 0 /Length 7245 >> stream xڍTuX%t)=p#61`"HwA#" H*%Hw#"H|[}|ǎs]y}Ƭg+o ^`>PQP @|  '`30{௵:7؀FNp?BH/4^;\P:aC յ0`?3@^0_kA]?%]-P/ !(p}ז0=7|u zub@~_?,A ߖuWW7 uܸ/a0_09J9׆5TӡyW?I ~*եˑcɢ)| HDW,t̐v2Ek~g"UW¯2)${泝IS%۶*{-Y+ehV/C}\2IL30ҶZ"s%yNRxOvԄ|) 6pndHn苩2^*x-TqkeUh ctgP1}k EK$5}7o 8-VH=R ;)MXsDՠ37 ,\?s+G&haQ=yڈH7xVqG\bUn׽-}c2$T@~E.ʱ_d([籪Av& #xx=+mEafsU^OFh%(0|\j覬)eE]hLGjsQ'K2L9Ca2kުW*E41mx=bdNlE4(` ؛J^8[P9D(y_{zR6ʐJН•vdt L< /-ܽ45 vEdȹz"h޷'+LfA n?WHx i5\$=TtMnC.w?gT}i^.~:s3G`޳߹c]k{Fe+=br~ܬ)R"Rn\2 {Vp_  ;J[%!W+%|i V)ۡ j}ɞzaý>1*}:)aۄ]͖'GiHWrmr[׈9Ծ7_K3 IȰi&@X /Q/FdBn䘌eH7ݝr}5[!l|iP$D2ֱau?9 !gkTVwEgHv#0 =2;ՀEt^Jys%iE֣\N1 QBh^iÕZ39 qzQ,Vy溩lUC/#*hQHUțe94]Y 3` ug@ȴB>tȗ*[L4N8Ci{OXw'3c!Q 8!u߬OxZįr|T!w $$W2*sr{9$/$6*BwqKOCrׇt2;e5z7 CKAvb{6p1י"?ڨ??fC*UcR6SABRkMB+! >wm(1te_D„нwph~qe10%8"VTӆ D;9=uP6 +~$>\b.mō!6J8P!yTII&X$3g ;č n\c0}Hr :y&=?lnQ{Nl9R3~.ܙL%ZǦNdHe54U.mj9p1c}i|c 2.,ћumqc5iZ؆!tNvZfRG$ }-}/uK+ص\䬡v7~ʀ/~#|ܹVe%"5<)Ӝg 8gӃ:i2#8jfQxeם6}yi!}خ|BÞnQG/f:W-d&};E3\>"Ni ^jqS.N-BqIG%ڒ}@Z/Dz~N)KI^n|%)aelG7D8@a.~!3֚:8s qwyV0fX_Y5:D@~Bjf@!ǧfa\khGlJiO١#/^bz;2uzG~Z~YZj''."~NΎADRR9 C\ʕ%UWT:n~O >Q r,[0#uLCR?_~Z鸯rڻ?w|c? xd`|Z骴] Wo/lĻ}VOxckΕBaAj3}- ҃&Q{K峽)dsk Aˢ3>.m(wЩ?F#R }L<FUd6KȊS^CQ\r@`#(E-u:wvVqƣ.vM< r+ . .o K s,/!-zG,"6+=XH34{k1F~XݴbИd5Ƚ|叙C36,pN:Wd4'+Rh2Yn,6^iX} `SKl+-+"YtuSk^ٸpФ+sg%wO]xl"b*$=xr p8*rO,❙UgS*Q[8g,VMը_Hs:|ovȂ^~|}1;^=mr]y8)fJ^ 2^C^:[v}R}upX߁WX fLRl+x$SarqrڡşI34I^pz<͗iZ"0š٘0 UJ wq^ff(% Yk|f[3IZGP @_Ph*jˣ:L T%'+> M6E(~ X9>%0?Pr>v8P<,z٫L4=۩,<1.|+ t/v\}vvXqƱ@ ORDOPtR,[М&==~O΃h-{qecO0=?lqȷv-Zelqcadק)i\R+L:]/Myin TnZ7b>IU0\Z`:$kT$KɤF6y߆DOu!-yft.L ɲe[f%@>}ވ^v14[OdZ`ɜ҇{Mi?œlݤ{31a6q)NC kKRFE4ī&zdyA4eapSh7*Cgu,6WG=A`p'&n^ܘyyKF2g'˼ HL 3z 7.9~`]gRJM D$nqMb;]>{|SOH)ȋэ IaW[Wo+2:lm*!R_e:z~*z_-<ǭy=\ܔ[78 'lo8: n^>|P:s}@{0@XR'ͱ 2^r;h.9KBܫ O!^6b/8L7s]{Zm S8,+Q#KocC9(b:;sJ jSP0вa 6:rAlQ3m/Tnf"оXģu-3}n#^Rh!ÏЬxwG"p>Ͻ] 2z)S3tcߝqt_,t%W]tkni5S\zNQ֗S[Z!B? ZedP][_#Ҧ:ČӉ쟁,|0"/^[/ouU`닾ŋjjA3ɭsޞƐٱު `rb< -ˊ+ ̅(By>2~1L~GXGaa0Y67®J{3H\JLڙWD-Pj|U,rp n|˵DI6s*T Cӽ S>Ns~*WPcLr@i4 :'oX3&zTKҠ|⼀Ø!4D5Egct|wV.J j5[bS5źGh;d]-:\}HŚVCQԜ2Tב}%4MLݼ!MߥzDJiJx#6c `|R(v5:aVh . @탚€͟dc+ bM;^ < y{i^yz6@~~.$QQj*F'vܱ4kyP*a";o XDx5?.^pO}9QฐϭO7a;i`dUc=f/EVo0MS6-|u &fd[`B00oo@vhwऩ9[/3R;&`eVf]fϘYKހDyB,>NX[( G[NJ5l:rΪa)9'Ȣ&~{,amDZďhƥI~!q\JgcuEUuӇ =21o`R{f +P/k ȶ(5da茗CSͤ0[ "Vƞ$Om%J1χ&_nh0%U!i{heg/tO'aaG yA5^X(j5fe:*ύ8DaI;,SںOd*NUbe*<>%endstream endobj 364 0 obj << /Filter /FlateDecode /Length1 1412 /Length2 6197 /Length3 0 /Length 7161 >> stream xڍw4k׶ 3ZtetQ0-F!BAE.zAD&9ky}]ɦPC($F@X$ Tֆ@ԅA@HTpr"0Nc8BJ/2`}*P B5<¢@ PBKU; P8Q>h#߷@nPXJJw:PFH6w:!(W nYG UZHK.B;G ԁM 4tD@P/(:08ぴu-+kw‚+W!w2CB> ꂵ1~ i uvGaP3 ;VB#5; pŸ # ) vUv(8՟  a7G}Dy!}1H;_y !npu@X?>8($Yp7 (k CW/7v _W+;a~|ݡp ߁[aaHcp?6h7e0ϝ%dv(OYHPSؔ*)R@qP$!W#+WiJQ  ߵtPX!8Sw鿪-=B /QXA j#`m忣(VH,E8`7N9a/9#p=;CW +3CK!8VE^T C] GāX]($Da)@@{\%EB\,QKGy;*Ɗ7;-mV< &FQ0PO'L^+}ROLE0m VaSIw>buVѫcGkprLX m8zQM vmNr?;4E` 'tʋ2nIo/`jm; Wore-<^z 2E']S' ޫbKW4e+ssTg0 L-eӯγ =ҩ|mըmٵ=?/V*4>XSgj;3th CKA= !RgwOWr_ c޹õˏ>4x2k+M4M"5t^,F<]7wb2|.ɑjloU?Qj}+1uÒ ldGSD𯶻 >qʡ.`-/Չ7ږ}`/4Y $57y[fј0ib?8YR[TMpD!ILYCL [34ש&⽼ 3ǾQe|Ksٷ# ^^_oak R, Fr7-V=5V9cpe f psX,2'ӵ?i }T3|6 oh=F_6#\O#;M^L6>bu?-hv{tC .O~DrPePW43*XdS 5`-KpVa3X\孅p:w س%ђ9yUƟ}?橞|BNqh[VTdv|ZVtUݡ(Qp,ɈCÓeҮ랭 l-$'ዀk8㇓A`llceϭ(Nu mrg XWYz|\xE -{;PıIu6 #6~O? <g@J.M=Otf;r`)iMoX5߮]zsZ\%$**N'ǩD:í_%}ȩTҫ"#EpUgifՇ-UGG MǬ!xWr[#Rb&R=+Ƹ HUXq:y}@ޗ@jGaj;a+h^Ň djP*^ջ څw5L Q^ށɥ\3vRRݒ9hX3KjhY%EQE-%aN& &Uve ],;ܲɤ]4 ')o[sN%@#'CYg$kk/}g"?|MWM[֩kW5H u8A gWåy3BqLë!Vx|s֔d jsZOU4Vvަ_PGL,MT2pq+ e:qpWY=fR\js۱9iFG-inaQNG9x-;&I^ǛW}k.TL0_1:*X$PmW\ka|,R8[b|eed| 4@6ҩA箂B(L5l]JA׺pA[ znFwiI?Vⷂ QuxAkt~F25&C5'bxd_B/k,۰L.l41-vU[3!CbG&wDgOj0lSQջTn7/j5*NkU.IARtˢ~~xy󅸋ߢb6GpSJnhX_]Rv[Ox /pHLc|݇p8V:ĩ @!O9d=WXͥĔw l宍0l\c-)},5XHݵB8.WSFE/6u)1Ieފg"frxOyH(wB{[?!K}0rtfZpl^v<jp_F t\|U=/чh⼫Ȣ. "3Gh(}dfp6+v[cFX ~<JR[%k2$Nу1ǔW)$L#jä?ҙTog*D5CЈH:*5 Kɢ-Mʡ[?c9te&x(JZD|g|nyza0fN$jԒ{x*F&GS vJ.mZ/zkZk:;gȈe&P|;y qU&D%)Zt>_.ZzgۉFxE>$?&[+K7OzL2EnĞ:]7E.0]?R_ŘjڹdG`wZfgr`VHuxFdP#FN)ӻD!)4Nox={)O\w5J314g D9Rf646LST@94bP"" Ք@ɧBQ}Y% IpJge7)$s]pDŽx`E&m7J>xI?UeJrMVVC_{-jAУɩM#WQسӆQ)8{E)ojL=jacLv 9n.,<;.AEDZXAʖl&Nt7tZ9oOdxrW:..3Ǿ4GştA YV]63RM#m6Z#\w$kߦ wu=S3}Lck$K dկ!^ʷ+WҦ)CU]kS6pWMC(dAo^yXq[y^GVnK|v-qDi _WfSy<όL3V@(?`X::{ Mxʊ$ S1Y6 d~V#O5AwXǠb5)sC|?=Qnf%Pvr1ճ ^);\s.ß2,({eS2ϣcv* SǶ}lRDt@=YXmCvm7;ce+'Y"_fIw lGr֙ ;о;g~eY&I: _o+޿d*]k n [%P5Xwo,eoȡx˥oZ?z(kz+#Ɉu:?6 E$Y󙂮>˴-?$74\|psEHE3!RqL˷oK\f7_nN: K~1Ԩ]of_zYAU "bʡ8Z ~-o~AtYgCŗ)|ө"corn12:J?nZ۩2b<MFiXy=OLeGFqJD5#/L h &+i ߥ x;)^G%j(ͰNpNrPi6κ[@S:tGbyȄՓQ3ZDHT D w9ͯIiQ޽/zvCÍz~Ş3+>*sݑ1>`-Pf؛1a2%\E N^C$"~c1wݼ#&r` RF(.,(^1]⍶1&1&0?~arF ydzNMȣt BC|*dyo*رnMCnL+FYi#4i;=g<#z4^J--Ucꯗ^8|5޼4O9ȿ pTJT,:3ț{{-׏+hc:}ۢeKVU($$cq qbD9 `[HᒿȒtISt&;xL _]NˢƖ.nXg]zS`Vۭ[z<&>S?ihyWz|M\ng%Zr-{s}"Kਣ.>4Iscʣ(|ipZ͂Ke7haW(ؗ?TM8+,[ qƞUS\vTZ => stream xڍTZ)c蔙VIabf$%DZEIAIF)锐Fz[}k{>g3pܒEUPH-H eh(D@ a..C@!pPtC1XB!,& QnR%'%PG!$\(?0>XRR\W8@A-(u`8_)xe0%uqBx!0};n Y0@2!./ug tFx mn5MY/Ag IwD`( rAC>= h a1(#zBP/P-an4]D4SVF*\\H;O}J78 {>n B v"l=@#$ Df HB0>hEO3_4 GH|ݡp¿ "` $kwCxA@?dm/[*U?k (o-0" IJD[wK+{4Υ,-@ n_!kk uA8mY P!@=_#Exy?LjpWAxmuïlsĜH.M@+p%8vl29_1 C¶0c@!$ `ءH~^i@$ *C P7a=~q:0D@$ &17aw9M!I c0m´l~6``(g{@? @lv ?[?0gv b%;؂@:?+7BȌ}K?+[ v4vPTƊu`;CpX?jjZ@V?/WS<ܰW_ p7F25I:63zZ%؏: L﹪(sM$;zRet%tjS8Ʉt/-Zfኝ͢ MwMf3k=N~ׁX]w;WycIH6g4)ʏ6qo'2,t|*E|Ic Gkdly&Ơa=ZjS;|ƽW7I 28|^FuVCvRg1JpJTNjF)yv@{AOyDcZjmd=nsOj! Mo@WCoG^-WJY4HHH_iQE<ef!(-IιĨTeLϣs-dqź)mrp]k B Y aL:_tp#_=ij9n27דeI+ܲ\z\|li2x* oOVf~qaV5_L$U,b@`9,GK^\o"|T}WF[׀6Z[r"{aۘ\8#V!m߰;ڮ5RL~* ,ދ,(̘#^ڬ?<֬ꯝ1^Fw9W&e^!#nD̕.Ck (A)r\lxI78nE9Tq@$ƍbx4s1cqXz>&bT7XADg1n`4Hu{ j\(Er\k蛸=ýlUѳसj  _V   @V_!RAuڊ߰Jpf5kuGN]`hM{#b)k,OC5sg}S0.QloGm251]Wl#Xp9 H@?1mߍ<~rƱ:gH[R>*]EY]/Y(uuf~Ny?أUsb[`;dC{8y3.\;uY7gOQ} +;V`/T^Vln|_# ;\j~/-rX%P9<8cxΖthZ .a%z8\qf f 5Ѕ:=G's*r0 :(vS5ʾdyN/R N eMo'.!jZqQ.X]SL}x18*ª;vM%_6.5Əܙ[{ł ur86Ww kz("NcHd ~g[gry>qX|M6!Qt:{Ұ^1-&o%#pOJt5YPJ@*ԱK?&_T6kw[!1%kxt$d}[Լ-1j] pBU;)@35k]Z2DP"eavxV+'X#]:3o݄W+h7t톃Ǜ^A%9ikɧmmZ%XOʍ!Y`L1#`8sN IISQ;HX>j-k:"ڤu<{-]5aC5vهR&~ImhQDOE^72OA9XJqQU~֛.[?d0(ɋB OqM泆8g8d\dV'SNs6yŽy㏈+OJ/n> ˭$vcGN#Ļސ4S"y9ev/*5f˙A[DiwHT7)5xp9ΐ(u^ w1M;%9}l1#غfaMb|;qFL.-}u[)CPQŋ)iچu^>ōOuCFwvڥU'V2|h |̮";rsス+qafФ+өQR0L%]D?=S}m %芽9@= g[ `Y{26e"'UP6Cs([Г/[*:sl qVLkJCM*fw\Zמ 7-xc,NN;,7ܐ˒TL6jGü Z`ǙyQwbLRԊ)]&iS)1V{vX&YR}TtCkI=qM28RdkeVA.vD3:\?'XS<i:OgaϷ@\B)GE׌;BΆ!͓zeӜ4]z13G'ww{|Pa)t\802#nDܺ㨜 E^vӥYG'Z7Xѥ"'}8BQ*_2X3$jW/=ꪙ}__[ӛ&1N}axvq+G_ = z5tgIE4^Q؟hپ&Ⱦ>"yղ=, aDp:cu#+ ^(hy4q2^Ю+1Kffp`b"i@x%oRJ9n2^qWꫴIJ~f. XY!bQoʓ:` NZf%&ޯ=zwBZ$݂pMiFR*jPڣ_]h͘X {^4yVv֟>+k0蚾i\T sf>UU%wFC`ΰtd-Ce{#\hZP>eelj bRUvQ<:l$!7ab);Vq)O| 6Lң-3@f} `R+dR燲6W{eҞT9_8i:k Z6h|k×2,Tx epرʲUg6^ƍx;\qVl͆Ft!BvKQpuPPnDëe !rr)Sz T))HI9mΫ$) Z"ose EK@,*}Է{iFԅr3骳Ǫ=#/뾵OZD0EttZUG eONJ[Zzp =mZ3V 1μ:}"&ohdf99E]{CGJ? |^jdr֗rx9u#nݰ1O&n>К-!ܺP&M\YJ%.G NrOt 'Qw)-}I,po#(GZ0u^UJxCbsHfթDފp N`+Xϲ=d;pۭUk2\('m%䛽?1Im rnIc0j/rTDD$t%:>;6P0y+"C$ObeQU6W qv;v9&<ß΋ ڥbYuwmn`{<!´:?dr@X"01׮EA4"hO(V{[q@\u8%.U茔 4MFԶF܌Nl<'EֻR@:+xb1Vu& &vtY5IWyhArdQ GC\ ֎r`.^B8CVƷY%*EUǝ2Bd.}'$ wrn8){(x^C<[~m"yUwsۡ)("je'8 rWAp | |k'v9l*x&>Y< yFv֕Ky+Z%r=ˠ[Yw#OU\9۫>=<[6g(Ȧp e9 k?U&ѡ GroR],~}Lx74^l4ۺ&vZHw:K[}{t(vWgiY`]D}jRqpوN>Wr?竗VsrƤR2I Z\?S%F1$|rʀxjmDUC9_Θyq{<6-ql>K?qQ_TɁkr2K:4 kcEfG]NAAz6gqPFDoEg^Q?Ѷ,S[P::ѡU<*,}-C L>G)8-VOd8Hīױ@E߼sBu+xR^鍽>gwb} )vC @/hxEr5K|wnQw 'w/ຉ5߭' e-"~p!n2h#͛x%uF @ E9D4;va*D[tSb8|R=1mV3q(cwΐroVт[ЕA捒>d0"8MQk1 n0ڎ34 K.qfD!k%G gKkE`.:WJ,jX]7L7 G[6WZ.S.ametf-,K^pP);3^ a*_׍Dm*o[pNEZiI$ +)R98=g8' 4|_*o::} ֹ ZAxW@1!~2F*>~sBWkY}J?' ^ί$iLck7U}rH˸ų +{,@+e>߁yn+ty(}۴'Rq~Ő&[vIzKh=:zQʥ@$KG{M[)gA[O .JVT?LtXq16pU^X䴈S1=csȇcdz0ta1:zZw30[k/ !eR;L^BӷBH\#6^N9lܨG0\8THi0fZ#ͺ}*s%xX?}Ժendstream endobj 366 0 obj << /Type /ObjStm /Length 1650 /Filter /FlateDecode /N 87 /First 755 >> stream xX7 Ū pZX@iq{W4g`=Տu\\pGso%8QSd>iӜƈgtZigq1<<| .ES\WuY1]59FW88 涚]j"0WmE[p< MvkDC1H*/ʼnL*^us4J2  `B 3̎E.%sk(0W" +07X1Rt6B]xx^*ȢT$jN JE xI qQa' ΍ n91LfEL9K%% n5?抨EskCD   0r8"&z#(5%zS/0 u.f!z/!T$'s)7gHܫف սy1!L}$hD G;a* {2@D݇Rqo#PQǧ'QEʠgv3d5 J@E2\1(}Ս*.ko\jLZ.؃Oh7K8vZP^˲$(t xtrK}KMk\hs֍@4poXar^yS-2jʎN*gMH_2w4-# +ur@AWZ/vY(u%t wea[m Z/VN&ĎgPs^UǾՍ:_ l1{qиCmKԳ o*L&7vm/mHc9$TպM bZf=6yYtĮ [JˎzGiskY'գ#$4 , M5Z^ GD ZNn '!"mf܆& h=jĞqR8q WÊVɆ;qAU"$T)[|SXymOXA2&bgͳyz}!,e>QO@}}h* nњӌn})n[?bL~>eq_H0\1ʳa2 [?ij #Vfc4^7ݻ4hrAn[o8ȡRw/i6UcUa{lZ [8:.TW10&_5P^&hz4i=F,=Ixr?={gQ9C''NWu}Ewz~΋>("[aQgg^ (0=i0AtK{QtcYh0;d}ۉFbuK6ƆZҋ'B162zEiT/GV\h Wק{~?=_܀5wu,+ `y\$-a3Uݿ@+%cO`OzJC[|Hp+"KZr^>>ѥ}B;bYiIz$}mg<ݟendstream endobj 454 0 obj << /Type /ObjStm /Length 1505 /Filter /FlateDecode /N 84 /First 731 >> stream xMo6/3_ٌ/)sv3+f|iWrDc=endstream endobj 539 0 obj << /BBox [ 0 0 504 504 ] /Filter /FlateDecode /FormType 1 /PTEX.FileName (/tmp/Rtmpj1RpeH/Rbuild59c613e21630/kernlab/vignettes/kernlab-kfa.pdf) /PTEX.InfoDict 103 0 R /PTEX.PageNumber 1 /Resources << /ColorSpace << /sRGB 106 0 R >> /ExtGState << >> /Font << /F1 104 0 R /F2 105 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 1094 >> stream xYNI Wqv=&f$•f H4?}_$r )ulWs8 tBSJ)ĉLYen޿ o/H1pys ?Wfz_RÓq"ZHr܄Ӱxs?z*zJէ_,;[T^IijP̡'CF+"$ѡ.rEuNybusc=i])FQuD^s49 JZbg]-JFb񨑒KPר90ČY=6Rn8o< ȻnB-2FJ|7~tʖinq GR8&jQJ@oԤ#_y^Ruw;r1i/YhulN SDNB*C3IuY ||[(yx ~!FJ 5t6ºpp.>A@ӇXY ̑wp<C88ؽjЭKܺC93=fe ȀbaPfz0<0~fx#= m:;fkl͕yt̅;bD>ppPo⾄&U%\u+uKvR8kRmL,G'ń;FiIrM3%@ݽb"O8T t'!^4uࢭnBlO/pxY"5y'^{z^ un/}xZDznѮ-G*RNڜ|uC|!{|j_lZ(wLp6l{]\lr/WRN.uí`C\zu)f]!c0S[b}^c[kTs>>5)/xvM2dS6s"A}:)N.|qy9lΞy7|aendstream endobj 540 0 obj << /Filter /FlateDecode /Length 609 >> stream xڝTY0~ϯ)6ǎs @bEo>ds#I߾x&K9TUL'|s#؜ ڻyi22fe%:lV[nv2zd =fԯw|+^Jig*!P BP1$;W9& ֣(OZuj= sqZBT=a%^IqsA{,/[#?㿠XݱԖs*ŕ(FZo)X3[Rb )Fn] nT"'(̏šwL]@r nMYqLrs^< x%YƤ2LYz> stream x[[s6~_tvBwLlM-67} %KX[ :Ĺ\@% a25B;l`,`RBZ)#UB3ZkV(:N(5:^(+#:A(Ǚm*),f:)$.&tZh#$YON:Za i]Fj t@R:J^ Fzǿ, "V\| o|)\0 A8%ȟA s`R#pr($_^KP<@G 9z CǠCޣ'/ !zd "(b\!E\BC#FD-ٱ"ͧ+`N++ੈF"F e!'a&b`P= gyq̡Qz!e);II& 0GRN4H) CII8C{% 8/SUAU6R㫯DSOh\4j8AM({˺W~n9`,n}t,tM1)QuR& 'OD_'mT"VgJL/\(.}LCFhִF|h4;DFs@{h4KZ>@{fFӄ~O<Փ*P˷Ű1/DF4GMBӌNHԥ<sV F5cmiwUoǓ_/@zAGt8l& CCxG!RLjy4R~3nA Ri :c XN1H8:Ll6!pW >7?Zh<=/i٫ƽQ}\#"9N/>y ?~wx_>_B:vݰE7kgNjGmc>Ȑq g}V `Y@ᰅHn3\sRB=GE=<#r Qcepfd Qs.c6MڀdHviFP=OAnPO/t3z">>N@L^$nYmtе- l)8xE=D,wxQf5~GTffu8tԫƩOCGqڦhoLʳrˠH߽>(q(ZAeAuUn{HNģ=XD;X%wCܼlNi"8vTOuo,^O'pK_\?{Xo%d*V}mgUԽR_ha{x寮=N?e^yaww9 t9Bçq}5WV4˜Mz8XTt]54x\lT^yٛB]y~Q䓺 tPFPǣxhNI lZȰOS!=d_.Ogh:S<؜!7 iqʪ0gV0`m(}@Rl p׼*q/bFʸ۹ e kА!_~2,xpE$^[;߅n~suGwHJwq*+ 7$ZRzJJէҟΛ)MeR`I4O_ q_us)(1:L l"Eܲ~5IbaٴC| ' ǭ[>2Snd(͒eXe=4` N-Vj,@T4dq |O /ݜ@#}H&gIȊXr 3O{uj3׸6xXkc-mj)uGk+VR+otEYc'd.qm/=e* GծL:؄Jot㳪NlZiƛi9;|Q.i׋:RAGwfa@h.N+l_.O#S0S}ƓJۼ7"u@gD,7"^wEz~LP +p[/ ZZqm6ghB 0,,zk!UHmef+w+eƓH0BwNlւ.[\n{Pp0.ƛ[==hrZeHA{k, (F$> Jzk>S!pKE$r_劇3I[t}ѿf;cWH9t axreAV+nѺ$ɂ b>n8T~I9.`M'8#H)U3Ex]];=irL2<3~@3L.hE,vbaP7_ ޥn KN ïut$@5ߚfɈZ 8yRwˋ p,:?̏wyO*?E͇L10I>9UU>/_oըLj[|%˳0micNg;ަ쒿׈nł6l~>)u tT\1na"ױl52ы2y5=`;knY&^emp"gP2OX+Y6 2x*nRYiiFe!ۘ!"^rt'feYŮ|w~`wcN)endstream endobj 629 0 obj << /Alternate /DeviceRGB /Filter /FlateDecode /N 3 /Length 2596 >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 630 0 obj << /Filter /FlateDecode /Length 4470 >> stream xڽ[KϯoR%bAʩrNbOٮ]83H,j>䷧R4aF$FF3MW神TYUbr};Uh*e&뙘6O¿lR,۶܀; kj!iG_]# 'DMr\K5+,Ln\[Ki{˿{)xMا ͽY̤so80O؅Hyp9q5e&h1 N4,4JCv?͵ӯ2os!ʸw!bѰyfe /oYeU!3]iGǻh3I?cJ[7LlÑL r]\&*E1U Ǩ]UA r47WׂcBh|;RY/AS#R_j-rK!p12bv)AL8$UEVQ;n8 :6R)Z:3%A#_ 80F>^-|lr5WZ8q1D H~dI5y{E@c)&&MЉ$@ӻm;_Sn2&ᘠkhmZ@-3F1f32􈪆Yl|Z8v~!n$Nʎm}ndcAlRX1H$j/o,ZLkJp4o5y(*N`0O$nL0bN+l 2qUM"RѥW--hAIMR ɊRzwp`hwHN2z,8x.1LWNE+cM՝^'n[:D"^^:/0 - ڍ+wחkӷO$.1EgPO/4}L )Pe iW$^zs@ޞFTF~b2g#t\:<; FUr⢍;=T} 8H{OeJUQV.YmWc`fɠ?oSPG'"`C LjLQ7ұ}dԠSZ%4;BlDZmd | < f .5O`zp2P:1l`6)/Æ}<0aҖѮQסêsp%`M-´R?,dl0.wYdz]#AazoGiZk YmG"0rArIyU3K9~Gg2Z/|BbJ]t*BF  83lf !v b-4E۝F~^sa^WD7l!XejCgJ夒WyqfKU7.Θo%s>v[QfoD>[Z##T3-m1"jE${2m}8i,8E08$cDIgW^ 1Q&J"6z\9,8)o&\']u<}k2AeZ 6`"ټ)fEI] Y~!NZOS7,4MI`|R_Mjdp6 (d(C@eeH`OH`sUC}~)w&LL}&@2iP,.(D)j̘_&dJlL٣鬌kVT J8pu¾N' ,hsIG2?dޑD*F5L"TIGd,sh ާ6#[ӧʜi~ARtNI ¢~j2X=c= å.'ZpnOh"ݨgb:ECyڑŤ\RYF2OR1bfa% /'*4z ]gF8r4 ἔcs|#!{>|k/9^}B,&B9LL$шU$EjQWkg!Uh[+DWUU\T u28UAIJ_WD]Ya$[6Fܗn$<#(%Aʫj`Y` S:\DGn4*ӷ^3sz$k/MvqB4ā$Ʈz!&\y$EozHsU2v$D0A}Iw1Go![%Ab5#g5墣mxߏKqnw(q VssUǿ4+xbZ3%cj1?PQd2.s–+r?0qF{sy(e0D wHdlΧ_e _VU&6]Dl[q~A# 4vLX̴/a{rpZ*)p viy*:gdA ]./*SNڪj MĐ!OyAٺd$ qkSTNE5ܸ>l8cW\ɩ-7IlϾw$m$fdXY#P$_ax&ʼn\}jCʝp ]H{EQ;7l B"yo,Fg=q0ِ)0ybtFܘL#6&bԠslsm[!8'"e6%{.' m2')xe1߻1 kحkpH &QT ї}f?4ukV<53lmQy1VX2 e!aǹj *Cx; UU#qfVT:Y})*qv c2o9쐕?Ys*Vkb,a[;trSc=/ 1eݪ^byYL!eCǟھE7/endstream endobj 631 0 obj << /Filter /FlateDecode /Length 4029 >> stream x[[s~. /Yu˲Jr68*` r\Ϫ,qӧA"iclW[DžR;H6nA˦tX؉4yw^Q48LeD'cN"09Ӌd4 0;d(\:}ϜLEBTi!%RW5OOe>h8z!t&KXH fE'xo7G \xbk?2`i I&puʹtd=8z 6>P6NqC(='/sf96 -Q]%N%ҭ}!TB2Jf|]\<ōcOv:9[p66s]M6HẏwVC`[M>708 .9#Z͗V ٚ [A<[YA]K*AV1.(U ;j>v6_G:KTN}ɍ;LS~ hfaD w"qFR` ):utÒj7' Ş7CƋ瓍B%/n֖}a{HlYyR٨`y…/ ^'7Z'/Bg#nzӸbN Jf4ECP8G(Hd}-E?n Սלkݠ@V,b(ڒ[74żHm(hSF+aj 38)iNU+xEX8V;ѠEDAn. `8o F;ӑ.ј]/O3׃+BQ&[v5/o/MU2Gi a鏖vL EZ9zCm}hYce]!yXŜ_5959Hڑlki.}|x,}!c|&&UUF8yHq+v{sB0ؚKpƺaIy}~ q[`#!@IbaXQ&7Dvnا~}>R(w//;1c8j8j%7&Wi!&!{lZAC!Px砿vy)qN6{yAh] _o+糄Z~O%.y8,OS q>7R?د#!Y_,8x2=R8؃SRGAKυBcǙz(!^uZ}LÓ6+ ${9\6<_@  qNJ5 ]Ő(g1!顫 e5^e-},e!Y}A,Huҽ( m^otW~D{\t*-=_YXdP Nr#9 =?ZJMWm7".4>z>-[Մ4R rŵoۏ!6y\];/&+WrN9rаFobO*?&9"WIJ$o.ܠN;0]o8ΥȘNBv@I%W߇r]ϛl4l _3/#$;<ᦐwڋ`l󿸏 @q:#5*s=A'J05u1Ŕ 7fA9Y//QhQEÁ*oE"5kD訕/{x7rE_I"}%&.]]|4xhz'^]I ^o]ܿ8'P*`<{?`p[ +. ΞDj0}eL3D`lnώAA;)7\9mrKr'Sb/?rN'?uendstream endobj 632 0 obj << /Filter /FlateDecode /Length 4070 >> stream x[[o8~abU$u-0$N;i EƊƶ6bQb "):*DTR_f4>|6UMf\Pֱ~3ľ@{C4HP qu& aY=#/u.+aǤx Rzp\#b|繼3sHV`EGw:1G2lP:0iڰWQEC(PӜ {O"(OeѥTdcWo ˃4 .qF<v}PG|Ѭ$G^-!/t)+ n'>u_8ӔZŘ 9'`)Ħ3iD9d<"gP1P9HiNnaN0* &I#i) ͋V%["id<_Z1rg LۂL bڥp˴PL Ӣq)l#^#cפw #ǂ$^F[pV;kj#/G^`55^bT= \h"YFSmxT_fYM8fسUG%We<m0y6LRYVL'~6?;[Uk.,~o4|0q[ņ0^cUAӸIүkϷF0?K8ULTRL;;η^ _<%cmJp<Ӯ _3O x2vv gY^?P [& S b %¶BH`7oyQnEI'<^nHh*jƒa8bTxP)oQ֥ZYy%;@ wڗK*qPmO.CbHd\mebf nSTdx5, HTl$|P@5t$ Zqp- S*/$?2T1 ƏD8YeVz*hl֙qr'w3`~%2{nzN[R2E9S:UQ%Cz:"GC )u'l*pgpǬ1cĀpCx.~]rpGb+gX &4A)G &$<d1Q,y_,*{sg>cKԮ;*"[u$鉺'b_{5nh >:eczoz$֞@}P+ 2sK~B땐2n4T[5(J |2[?D8ر?d2A-#߬ 7j͡j[K \;&%[;6vF\7~xBN1<`l 0RmCXprʠ&[byШ]N6f|{|%i/[=z>t7<@ZO3ZqxX!2dp1J=ȞƭM:@F/pÔ<E:ǐK1׌l|ך˜{)<_br˄ .ofX(F+]RNE9V;75*Z2i*Wы W>P; %9`= ^-!Ic# p bܗJB'e˴ Q @"6 s1] wLBkDm.YDKېI2̜KIvǒ;WQR֓5Tod`kF$͏ڸPNh R8Iv1}R x/5;". >aQ~৅MWqST`k<٣Z_xo0`XSiqPԝx6CMŽ{[,fn$3+ED)S?~M 8G֬#)M:m/ јr<@쇝 wMxFuw C : >1$^B;Ft~ؑ~ӡF!lNj qL6\pXPhuKG_:ɜ3ъJ^[xt#[&:<e){tr~v5eICI~մs(sPecuNCH=)r{ v`δ%gz9ar_aCOtqN(a+͑  2^9c &+!!U# c/|D>K'Dy| =1I=~j"2Zu /X}|6#+$N0߯m=j+͐,(0/6+FbBA؂S\!Օ>V]i?$֫Q"^ݥ݁53dr+mw@jkL:A3E_Bİf] f{|NS̳z"C59g%бb)q+/nNsIDzh-zju(i N4<'^]2IQJ+ 6AT\o2.ʱرnc!u% /<ܱvH{Gd;v|,׮@V@Ȳ={Eݟ1A8#Tf43Mc'M5c2s2YGX+gx9oۜ\?ZG=C_DrUk'ƅF%{DҜCR_u>ON'd1vѹw|;+02J]9 od2Ǯ}w=!gy#6m|U+pq. 5]5a(xfOI.+㋭9^JG@ ?rcfiǔ _WU𪪥tF_Xw^`͹XWtlz?i-M$U_; yZD6V3˵9ԞīߌLM2·yvc> stream xڵXmo6_a ԌHom͒4: 8AVnyٰhx${5jŭ{/{Y+WyjV-ʭk/ZDƴ:(mr@27mEE[G) yV4fY2P#~;%u7DMRh4&[j23`66h0*xK>[p7hW;-il/uh(yzP[0Rb]ًŪuw,z De|hg6.h簍4*aq;V0駝Jr06X$e6XlP XM}1c c|U[:>J،Lh|`>yKyuYQыHA7$UlZE. 39Cɥ .h}ϝ ,˓`D_y]nX$wB$VA^e~6X5@8Nr.=e;x?20rub_#\gy} +[1asasnF6u B2QBNZ&cv<* EC$ wDO+?j9lT26K)N2 5nT7} !-T ee4mpPUˎXy('*3A&\sPEpbk%M!;,Tƶ #T ,e;S.l)'W?N4`ȯ'& \!Nqp:S.$`Gۂ7ǶƛΙw7to ߲WPNvQ`JΐCe)_8$VlGD,v耰07ʼځ"-ԏԟ }͛/(YHb~v|EczE7YUR}sJn_SbNTnn0HVRc:Lw'SoVW_1uqKg7g9vQ,-_P,_ Gok=rpy]Չ49oh]yRb}[9&RauSpI9]t ܅ .H!7Fǔ;+<(JCB z {\n:(׻ BآJ{@S* ]DuNJ6bPDǸiJec΅PĶ>#mX ԫCUChendstream endobj 634 0 obj << /Filter /FlateDecode /Length1 1379 /Length2 6066 /Length3 0 /Length 7014 >> stream xڍtT "% %=t J 1%4 HIIKIHtH19߹w{׬ͷ~v|lLv(G %Z b@~~!? ! c{EzpPDB!(O Bp P@( *) &*A`v@-Pz)"|0GͿ^@ 1@yW(f  (G+-hAQ(!BIy{{  A C9Pka6g3oGyCP =p;(n4PAP;]W~i"<`>*@^~epx ;E-_BH/і_D;o @p D/G ~ݧim4D"JM ljߞUznLm>Cy^qV儮 O/+s3^4jIk iaovo|]/:^C+!Θl℺g=*>ue Ñ3z[UwMřLNS1cxqHML0's  &O-U ztRRRc 9]Db{z˙F6"p S[~5i˪kR5Rzh9.\! ۢlӈ(yj&5ӄY=^/|2J"?%CX sV旉| SlT"QSE60F^ y-QG+re\BE&P]t6~Fvb !k(_gm+6)Mi)qgFm{m˙ݘ^ UBJs;EoEm^|(eެ^ԃ֗R.sA5e*+/&}`^73dэ1 p KsadDEe)nE+UIewy07pJ)Ba0HeB2U'8'DVC6o͸/ IjCʃGOb.VRo!_6ݭoTrDHe#W.9:>K ^p.jMLdgjGrs3n._+ٯ&^]+g&ɿSMft?kj"R :,+8&/Hvݽd ~YLAO[«u Jߪ7/U66\LIf nu#v=U[)Acrx"J3?.T뵻CGj*4D߰aVkw+|T9&GŬSùvJ9N& -Ǟ9'kɦ9ݩkc/ޔAqi?{<WK1ip+$H<&kVFSsb>1 @U{²͓$I֑-mGWcU C4z"wB򧹉ao-j;ǒy'rdQ c 3]QE^l@)b 烶 qR+Ӳ+IS17MtXg4>/˙o(9W$kYZ'kH0m y.xs%Ru΁ lCɴ)i:ßW򪀙Ԥ$Ղ6]JeS[MT7qY[Mbۧ ju FZ7-nu;-91y2~m =юS/+_X֋^%k4ڐ{:wXmX28qqF`DߤO*͙l(16x]LR·D"|c1mba?\Xgj.=\)?Qۭ)ѤκU}J.%.e}&"?wC&.kZd t+3o6ч\BeWNw9J?ݓ +;4I}OhyzR_9퇥uQTv>Rt5l |~bPpkg7Ak,LR&:~U3 fOOʲnT?~ 16IH] Ȟ{G`y`DEz=qEmLFRp\mǢKkg+5u _a FUM<+aVwAT*񂘪̼h-pVkuVEEUݘZRkJ^| \`7K/وh+5}%cÕX_6WIBy678οs4i?o Kx$o"sD@ :J ֒iEXW˕ivgp3wp*gr:S,ȸ!!+7YrS?N(ԏoYJV,;xj<ٺJHzn;ԍ,]%#U;[V[O 1ιvXbXO2pg64 S9cm_tiY([)bݯ3Hq v~-XhtE!uA_o%<.5dNX+ s |W^M0:ypTEHY6QsoAj-fI̲؏4{k;zfO\Ƣ# U4 uDb%?&  6.o)g㈶h)nÜ3TvSP|al\R /峭*mEЍ;i i%y(Ho_WT<Y.~|özӷٛ\\Pќ.<>6c

8D)]NZ1&b~}QMs]QYsg^.K8B`= %3/eTynX;zRic4o< Z;i%lf.šoγ.{{FV iJ_(S m-y{K;Ead|Qn+W+9Dڠ쬊=.>Em [^9Uw$|@*W؏wcSRߪ mz Ap4zZa ?+9ɢRL;2+ܔ,cde/}̐k/b?I[BOfq5%j0JfەױE#}e_UodYU 9,v5M+dxWgT^ʱ7'}~9Z)‰՛:ZQ=`>ZBIS/lE@1o*g->(Gwd)/UL6I n"3wȰG{N=;惋:Ҿ̐x5j r;R+AeAVca\ֱ =ݗ@G'7{ɉ1lJeq7SI myW~ xm|vi&!ZMC" x*a2ܪ,6t|C83NHR]P9ͣ' C7w<7zT{&eLD\fݍ4RQ`뻴*{~ˡu"%+ Cngf.L.4qYMc)OoʬǁG~!:i 4Em*q8EҮ#[lMT/|?X0)LlkLpJ|KA`#eHdf4e _k]69\{ h쮠7k3=.' DkNp}| HOU}>K%ӡtḅaUk8Y8z|l!͞*ՙ;H'hĠڂ횑8419,IG knwʏe${8fk(e%Cf@SA TH78Rf7%26G*)Iqx{эg5JObK{~58Y߉3(YhxhlJrσ5|$^:M`Rt}VtmF؍{|M`?`aOK<F=p"vǸqjMz\{3}CU{_5S5mK~jXɎCv9;EHds|{۟9:U/7׍:_.q{OumH~dy=4H:c.QwN9B0t[7dk>Я:.<)UKs~Q[~{[ }=:mf(딏?z[>CV\O,∴k7\%t1ዉ5 )x9LT~kQv>2RHF!G(zWñ+DIi{}1(m4 "pulL@'ƙ_o;%Hފ ϣOL@#Vq1{=$(udlg&ͭ$p am )}Iᔛr+掽m"Ed+^&#>oqTM1V;:KϽ'w`> stream xڍw4]6тDFeFE] 3 w;E'z zD I>}^ksvgƬ#AZAW>^8@ASH  @ll0W8/1(D vEh;M$ DA ?$/C$JvA5$BtBl\k pv9BQ0k0 v:wzHk!8$\]ā@^ /e+ t.P;U0@ S/@Gq"\n@oS|` Bs{e{m|J54To<#$ Ą"?haWaݦe8N?ci!Ѭ8&Hd?Sῢo$jG v2@=H ghpT]1CD2 цZPm ץK,k&o=7R a0~!a{>|I@=SE ].tq~$y rDV#1߈ODDq( Bo6K}7@PkDgrt<_F&پr̡.^rVe<[BȽ|Mq,vs^xKN_Z fǨ?l1nt}Ռ١Ɩ&JG~ѫYW:?6EgJXt' ,x-*s>+dS'dn^p ,??^,we5a:&gFѧ(N-,ȆbQ,X1'Y{oOJuV}eӐԊO{OӾ;xOH"PռƁD{V L_nM, Z0U}>Gډ*J{WEP"rt8Ho5:!ֹ{DC%ڗ3Fzu9$3 goM:iEMΚvkM8@uРV45rS@k|U/S*:;# zr=/Ea,a&QP[ Q-z{'?_짷OvRe20){`ȶVRqU,G&?|ׅ2;΅(UG$'Ho ۷4Iy_SJi}WRE"}&:g{ѠQ(m߁Ҿl-~ҙ [#CqEǹ5,ɉ&簃#qA"iۤP5H"Ԓq*H{ lJ|,@H;QrѨ:pe=` ʐO鑔Ҹr[]`#F-vR-uNj%cOlZFYIore utl7aE݅܊4(rT'*OyF\MǍh{bUt*'SP$Ҫ âF sC-KEDQld>ߚq Ȕd=QӥoqQL^ E8uEpoઽYZD [F5:%3N LwB>lN/^5Y@>?(-+u ˜!ieaQ>(nadb&n~X\!Y66O,%Wχ^drz <_86X?ר뙨aB"?]-;}LD;Z41s93EZ2&(|M=MwkpBBum .ّc)F=U|!xsy2;oBr/Ec2R(]x0 ET#h"J, Fլ=wM`\'DsAƮ8 ^:r!J,<`b_ta Ӧ!S'Bn% ^3Z: 1kyrvIOVKtᶘC'ƷbӨ)5moalW'͎g[%I^ߍ"XYS];*lӁlMc{ yZte>>KRzJ%6Ke!1׋5C@noG%d cM)ڪzOxGmG^KS9E׸ne1#(>)M1aFclkqtGj>S?ۜc9}dJw8Čթslc8m2G,=ih<,k‰PMB5QFC=y qn=X lM&=am;pbō'Rzǥc8`p+e"a/SV}7Y'AlOgQf3}WN,gӥډOwlݡ{^w΄3w 5,#ɡ|docjT@ jwֶٌ_-{ЈQ {E͇@/,?^lUme7-ݳ.i<.R_R)*y3sK#&<4Cԩh=(ҌQ~w|n)6+ZeqgGvKN֫oK_}V=US;3ͯO9V x.Q< L\?!BuwJu9x"/8ڷҥG6 awpاzuq75x#>ty܍Է鬀1ZT.s{ƏuDd"9>8$NŨb4.f[c$f^|>ը>om햴C #.FJLod)p53áB xM9EqD'WO+*HɅ&U5aoW7UvX}I@YߊOVdG=U͵T1џ>58'՗mA_1j'+*%HTnGWcQitdx3g<|[za4Ve. &-ci%z[K0dqu7P(?_QQىq;-c!6ƥk%vt|PK-3~*oLHYvȏ.:e _^{NM' tDj|X.o庥p>6TMJ>=`h3]Bd 3l. _32<8,Nܽ"w1O%?],Ȼ%w8oIXaBzI$pdjMwOɛJ %}{Yz4(M0(&=Ϫ>:GXu'dzYeD]I~5qxFP,LS;k)<,E M4~QbC5YHHM+4Ť@{ޗy(E͍> ǯ SviQneU!EzCBS7. 8q$.H'l#[UV.(;tDpn,U*휟GI$JZ#>0kSCD9-{FG-g3o&z:GG dֺP?91p!eK|73x8TwٻSghtmF ri uVK'psw`p475U^/KuG1{D*cw-Bɤȳ_J#e}ɽ+9v6b3' &'OHEשUS=Qm~0 .qܺlk7"Q +$<0{22CM|fzԪ,KdT.BZa61ŶkŋȻvϘԩ*B*}sFWlJbZ01)'stil bxLQe 2⬰8LTEZ4If]Lx= '^1D,/>q*׈=ۇ> L1((|^<8uyZŴp+~Żi|':t"sl\>ZCX994 K9#H{q|[L9(*qgR8ÃNB<c=7]*Cv2+j2c=qs,uGv7ojpY8My/TXŀcq̈́OLGZyyl [Lr9 dJ{8%df4mqB>fUAbX]:_+OS6\8|o"+I]z57n-ca^[fcG(-pJ醥WGWECrҥ \ eGrHrp.^.4I 75/`9]0) .D,kU~9Ei19 nz}:/#~6nLnu`szMT~N(|ǮԘlOai_CYZApMƑ84@4D&z|,x40X3"ɾШ~^Py3] PhBuz}3!ٮ (}DBOG%5t-,^ Pft,C!11]+'2Wl4y!yƓl4p~-e){F+}=ۼՂo/:=:x1=5cV^s(T\Q3GKԳD;_(vH&s(QӓUQē(N>MS e08+g~Yb1.wDZa/PIFԁٶx)dM/o䲕P␁ ;;N:JP?c@3J$ eM5U/ʤI[% kL0ev8fƏ='ER2-j~V &qa.E`J+ɉmgisd\eA!o8Z,|f4rIgMEMx;l3 8QpMNNC* ۉ/\ HZ2ʜY!fM?4hK`ZqG]ʻ#K^F=['L#] W+7(}1s9E͓Wdw@.ysazNԤ:wI57\]vS19` Cr6K\~;O\Q!*|᷼U=;W3@Vm<`ȅL0$@a@V^t4u*/b<}y PĨy$=)wkz;cU""IX\3Zizi.~xNY6-,avFnGMdIqShÂ:٨Ht`خ?K{8fOc@@v&"Ɠ]tDӔR Rm\!_MOӍf쥍v^yF?,duФ+xnO56>(0=#k҅cb=J*[8%endstream endobj 636 0 obj << /Filter /FlateDecode /Length1 1935 /Length2 13742 /Length3 0 /Length 14933 >> stream xڍP !h]w'Xpw܂wwww9gfUVWu뱽ϳjR"eZA;#3-#@XVLKJbl Ktt_ha1tHXl\\ &9rD ]-Lt);[,*}PS99iL- m@ v@g)AclEOFghDghGIpp6(@rr6?T-2+ۙ::k cG 6@YR o+XHwdQdCcc;{C[ [35 /&CL05###O1AEᇾs2vwvsC!e>6YDh? GǮ{yVvn=Zؚ!Ş^()?633 tݍ(a=Cd 8;=_00v,laa?Nx?>?~?."v ٹ&WYJ:~̀ߛh$f4Qp66U1`@;'?.-#}LǥяCKۙ1]LlCGGC؏#@OƏ14z:[; ~0Ok8~q?@;viΘ;в&J׍vowtO=sɱ :2N0iyuGV`:%AE?Nijvq"D׫x3hi B.ڃ[{m_h~%4K4mj_,9lb(gZ|*KwۻRq԰ާQZLяs_U:?~::E)t,Y\ڿS@&e)B)¶utnN m7]ߑXU?͹ ThN2os0NP6 {XY\4ֳgWIS:+WQu5"u;U]"ibŰ)GSقH~eG,,0>+!)SDr_qFqx?bfA7[moRIly魍Lm}v&p }~G442N⳺A! $fwFBUwUlEX%t:0,<܎%6xy@ۓ4_~ں6w |7"mBzbS+j/q3"%_SbxAi>`f,&RJ/~kv,q!Ƴkɷ>!= !p[UsiN zvIt:Ws8rR/ elM9؂0Q lrGm .sry,fzi%W2鉓;aX"?ۓ^XA_]SS90Rf"v{TM'$0p |#tQbJ Z.wV*瞇 ȗ&0a™"4K=`YWH"5DFo%t mb)I~Pڣ XqRE:64VlsmH1]>Cg@ *hlU!f/}#pv-Ej}AlSIO\n4}o*FᔒˈV&8,iZ^1Cxf/bt]SX~D0ifuXYdHkmoyA = hNItCKL[R L+ )'"$ 5tmE큝ҼccCKHzz߈8{A#KUϮ:uR[6|t_UF\NIjwxSM BdǷL7n%a=1A[NZZyH=DKou'2*_tƼo!`$]@|D^>9S_ 8)?TH5FgRmᕚ|gm~ͭ:v<  5;u_NL@8 -A-ymH< SI3H,,:qqԂ`H2G?%v 7;Rcq2 IbE~g~} Ow*X}3>FsŒPV^@{/N<z_fngrEoox@{^h a0)qH]H[J v8a]y1(R_?ѕSύIL"*20 J(rb'DY=(^9xpm"@,ʊSV䧇"x*]$`4R^>[đs=5vffgP, @8$2;6l{NFR_MqV9Sň5Dl^nȷޟ7H{PKD5R(`n&2gNCCܴU>p[ 8!"h.߁o7t=\Xi+؞@Q\Zbޜy,1I?` l\L͠bk+3nTO[>smrU Qzݵ=H)G~^,Gy\d.[SՍpՄcӥ܅6QDIן6=gӂ!D O6l?wS>mWn$m0Ɵn.8#`64b\,T )WHF]` dj>£ȫ5ט|Diڙykd;o:$Ef/_PYǿCLp|Su/JS*7D)6$鄈CKH𫏗& ד=D@#Ku` cp;Os>(cSN snTFQ ߾g%ː3>woUq{؋7D vkPۇYO gX#~~6YWr;6vqU9! $1'_n, ![[ٵط,`&<uz/@sgDw,f7UÛ>(wbBÌTJ&4nI9jE69u1>mׂSog^XQ @hf\]RD^|R-oJ!={c)-c'7Eu5QmB%|,7pTY|'1 .8,Ph{"k6{ X%)2颽"@أ$&y@2Y3q>$MÐlL6q=F-jhL̂; #eƚ2ޙFQ7,\K_x/_M$Xs(3ۢi觗Ybԗ#$&^}d`vಀnH1)ӞA 5 #!jYD+HHV#ku&L#>=+ɔLUɳV6c^Z(E#oJOhuGh- k`a(uN=[^61B%'XͶnh'°ҺVL۬BO<"@ <q/ E Хh񾢑 Ş JjN:c1ЙG3DTV:7yT2CMIJD^ 5ڹx)0zc&SA%Z3<~2qE\No&@bڬ|+@MGouN”Pc_v}Ngme#"(E+ I%i\]X@H SQ->2-|/R8iy.V98{F(2QJl2) V{}$~< @/6?#Ў(YI|go "FS['?(c6~gd W8dIh)R'[]O!a]IoBˎ'g-~a\+GBu 8psrtf< W{ȷ(SoHQSW;-p1ִ"iA3ɋQvbT(5A( ޥa @ [~F= ʪu֋-t* > H%;"i'|>n,%/K"A5{t8di$GWvY)JQ2僙Z{ ONt4 R׹=E8)j*jzn0^QNo |%'A&jmW1OtH?gEpwͱ1"Q/X#X)0fYZ=1!'ε5 o?D&%]z*:}2bu_V3ꌂ΀7sgՠoh]]-9 P)k.|R0[ qxVڻZlgDGP U s7t~PԃXdHq:SYF?r 0 {Mcڬcl[g`EEQfO!xvAѷGq19< lUpw+µ6JQQ,4Yy0Xv{O"~|Qj%]Jl!?%ר,-" dѷ袹6@NX6=jNEw} TŽNlHӬMׂ9^"^ o^UL ,{#CI)ֻ Q+YQ qR YrO]l]رػOmm5OZ8Ɋ@񿯙v(KH]X: #$77>A_fI}휀:jx{ʓqo&MGW,(tAU2;B)}~ D鎘S+X5k 􂓤p[!Cc']g6OclvFՠVvH"[OFG2VX=ҲxA<6͒78+GkOb]]cSW"hAnaŒg[9ux,IJ#y_a¡oetzCk 3boy noU٬.ß˖8Vwx"KJ^5jɁcG)~HC>'n.QYX/ &nӄ=oAtg)N.9?u#|NB1^]kW,̼DB/qۤ1`O) 4I9_h[҂EV6 ǃ :̒dAĔk%vJ~8uKv(ނo-;*!m.>\8N켴≄Ȉ-;HǹST:“mÂ]uATz'?-1[R3e/sO(<'e[C]ZYghjTs !NIYP-y;R'Tڞ9 ^;ꕌTnAjya#g{~*`p=< nH ϽX^iaoU:?勢l Df.wGLl8 :I 3\[ߣ,FXvAq ӵ!ivuV8[YmjZ#?M'̮G _EʊlӇKcmJTML]w*[6kuAJ8E'7S68TE ez%(%^,{I76Na[O5 bH:4$XƯ1 oZY`^nu(/ V zQD@,/c"rJ SDڱl8Ui7Q7dr^q1b_L3S5j%jf% V!}TMaXnIP)‹U UCo-BΙta @+x} /O=a,p4En86"vKZRoQG6gWU*"]02р3ig\Hv@,CwhU%ox`jJzv +m MC>}p'!:樐Z.&ySI*?C/w<ڵ_ymc\1 &|0T/!;>QWID|cXF d6:{-2-0t=*k|Ԣ!7)Ca#<݉o]{ 1d=t 0N\6[sV0ȯ"*#Ef<2GȮ5t(7mg&KX+PS^OB^f E!Uwlvl7lHFIjZyLSmmhjNꪯ08Ye$uv4eu-HH3+NX!=}ۙ*Un KQ!VnSMuCCL; .>->ghfu`ՃSbPZ<8*E+d;!:U4!T{%[?q&xg;!wkorfDM mn U0V#(!+RDDqG2zp/z+CD4>zU?y t(on07q)AF[fvZW`/jMP5 =&TmAG~ǁ-T+Y sODf#RGo^WJLjFQJvqktM_9Wl=jVl|s%Yc]@RD+%WE. 籜GlToB7v|^ i9ost}}~ ioel5./BZ:!2UVAXMH ]OIE9ײGgf|,TWDzfϕ֗NH`u0ź!aP&%95%zy8W8 ݼ4 |~%_(aۨOU/'iCEºOGgl.h"RvjdڬǃRHsT>a)j%wXT4?곏^za؋R445M:^Fǿ;`lTQ9 4Q{0|>!ztO f}{5An#l%$cg|%V"uZrǸW)‹6}fϰhXG4Br$8+[؜^(*m#758B>xvdDMK(V1ҷ/6Q['}%w̸AZ#L&͡7wf)VW.L3`cӯ52[[ʕe@,+8c5ƭWYfMV>~r#bd3Ɖ!ۡϚXR@zH]\đ Kn)Hf❀byLy06P492#gZ@Lj n2>^[\"sZ o7*J•/߲]4H?yTZS!(D&0A*p!,-g5d0NWy_"FW Dp:1@!p|==z ZХtͻpfBAsֺ~yZKQd4ekq|kPA$ґj9W$=Rh 3+ Zu`_>Z/X/|!޼gG]%P&J;C֌BS :{n cIKL 5 lf<" S+x61/2Tfbrf3x$$uD~P\Y~^;llGz?Db9\_Z]^zj;xt(QvA&W)#CZ ȿsWJ\GlOji]rdnJɲ|;"휳-U,m5 Hʼ\D/=~xCW|f6+^:SsSkGP>FaB|ۜ'4KI2-">xӓ\ ^{fz x8), ̊orh# F GV5bITҢ9gn,`W㯰%hўOfqselUb, عH}LJtyZCAsȟUI/qhf!M4=)x9& #44^J7BZ[I͟itnC**+Iegw۩;Zɳc3|VdJɱޑ5ԻPnһpgMaYEȠ$$Ïqi\l%Vcf *NY.CDmמTLj@ܧְix̹)DL dUiYHjZLQ e l:3M<+-8=wև-?x%o!qZNQ!k 0@JdJyMo rT7&NiO3 q%S=Jy)x;vnUw%xDK/LP*%d\*42l=ɓηUe)Ѝ^<խȵ(eϋGbi^(3OmªtyKp_,Zկ 7h.w(n7!uzkd`'PQ9426sd$S>id:_$D>"^#+l .E3AYP,GvjGZKp*i65[6^Ko)q:Q>>9 D/@8'Χ#/Z,6{X.z_Yrx#QdZy!lzUEyB(YZX/q8ٌ2b6p,T57aᲺqQ"(MI깎N'M svEH)[$|uA;|k\ ua޹mtC݂K"ߨNb9ٔf$` )` bMvMx KOE_)˜z;"&bkdYa^C“}QJтo'oLڵ [ F_02 Fx,Nji|6WJ:"Mmvov f?Iv/3vI ߑUw}JϤM-F]}V.ȏ5Kslg>*Ɗ)JIq b߸V|NdkFaX gQN|E^*=ESS8h۴3 yY*:Hp}&-C f3FlˍM*) +->NMfC$XKTy\6CI>__tɫou,Z2xꕧ6IJ~OFy_hLX%{ƧD]G*F23ahY;ÈR]\Y ~~-^|stu5BKqaԩUm̱e{MB&舞| tcQ'".@2Z٭qd6r,,TP`΋9r4j4b,vlBvUYHB.F#J#@<Ng:aL_/ĉK/ Acu;J^8HTUO[i7K>.ocb|i: [om`ӷ'~~&4r_?V>Y Wg2+s7FBZv7y94M]:v ']QSTI܇5eC.S9VƆj>ysIIh]:lx;vwotuר}Ս!fǻxiyսCMM%yۋB ZO et[FT_:NzFt{Y83)&#́Z ~iJޞߺ>)Cݚ"cf#B];HQȔ;[؉]H*\U榓sa}7ŷv_KeK~_՛KNg j~\ A/pT\P+kCyCyJ"I(y ON|:c-˛boKf/6K~G[IZb͆G~q\~Q-I#j5-qBq&yp5I}sU~TuN(vCzT,ɲRǁYsH5k+5 i^cN@fw?70Jpks2 Y'ExXy2s\ C'D8 9vϐH ^j1EBݕmH(S$TU08G[KqNn0m8'"jZ5TpCHJTnčXZxҲvxY2l:'7 B,RXew_y^I4cR+ ׼Ls -C Jl TR MG槌%4aP.9E!FȊon\s`l1vlnd(-z i"ڟI̚vER=BF ߀81{p>7e㍟W9 +Ӻ -EY7R–n8Hih6> stream xڍvTk.t7 HJ 4H0 00Ctw"HA4*R"!9k֚yzzr(pu4 #*r$!  0H_nR^3'_8B0X.B` ,-qH?@< F]Q$UAy 06yXNNFw: @@!(@qb;B!H E1~*!ɋB\=E 0{=0€g3QR^ ov@<ցD@(Ol c-@ :, _ٿ !P!P( Cb|1 =|7c'J_yB=nOQO׊b`OY SAQO_"LSMCP]ܳd4@UpԈ]R!x$N}J}#qɳG.id1*/BLY* Jj~o^56Y£F7\=S.4@b Io$ыY@++|ŝf3cU*d'7~m>)T̬4&fFcG=K :2~aI =rqYDc(ʕdfg霩v@uӭ`bȧ܋Xabimi$A8Jޭ[z~x_g~uzqfkiN)S*ⅰ;*r5U]n\žttj]&G^ͧik#K~Ojn+9C{}Q6Fo+%_U-?8՜lW &rIHounn\ݾ{3 wrk/T\W?6G[֨DusʭVU:۪O$Mcrוǐ0=Kmch> +y`fCnӪO}@ާگ\N˘Í29IB@f`Ƌ8LI 7kɢNHYsKN:We(Lł t-t\?qMe!鹝O]'{^é>$r!Z‡u`}qDr0TLb7]x 瞪RE~Oe7u7uJI~_+:s8ICv5/iwC2k%fo@zbAfi#4VU.i ~;s16v&K }͠wt V }) E!*VuE} .r  uBLDV ̕a-u9-Ond:zHjWc":gJ\`ܼC_5욘≡=3*tC !(] iIF}cu R%5/~oaxZv,&?0pD\;P_/ik_{R*PZRε[;i{'֕o?~ ʎMMh.p_:( Lx/WH+=o{$SO+Rr5:ppJ>0 5 v1m!ZX1p0k{|ԝ؉]BP뺪R(U~?vw&ơ9]iC/Xd5%nݻ= [^O' fb\D o:~΄]hVgf l9]hHsi'}cł$l>_@ޒذQ(屿Ms@~<4ئ=+t[gsa7\6dyƟ)jpXY*̊8L.m꼵,-^xA-ʣNݐCx\_teRU u3Ov"8A&`SPIBn^!cvO=P&7RPE":+gYG7WJ?ɻY6'[qL9-RYVÿdIlNhBB>+;fG/wL/tt[*6w&;֥"*&{Vr0/ yw$)îJ[w\oY tZ}̄U?H=\dMGJ[60 =qT0u"*F(6\m\o"?!uǵkoSOmOLjQ+HrV]vG(5oUр@!f7 LO}-VrMϨi;:d:=ph  z;ooǏo #Djp$|%;zc4Z*pvaPuD$əs} xI@x6DR-1Y*SMޱݐ$pT9A` ^EU lI>)u)d)ЦAG_X k^@pD͙ro(@H1dGS$KhP=OB`sw+$SvF" y# ]<[cOܰ#MYQ6 md;>c RWY9o:mOs~Ɛ,pGӼ_a4:y卋YCL7ARnEN8.un㼡t,Zd?cY5KIW4Ҝ?Y9=G'7=:W=߲ncKH]nHؔzi@i~5+|8P*;?D#O73}CZo0:m +c+L ƺ 6-O'/ۃyu@mGzeWlkp?QywL{/&)8zp_sHL>5Vcn*9h<17h[Va--\7sǒ5m}ƤJ2%J-/v7p TCgsCj~PO]w >TָH!W&C>I4 $٬2BUޖ~sinlӯy X@)&WF BdWFP;Zk,/4Ͼ Y?o{RV Sn$Z}dhea=r+_;k7TތǼ 6aޒq):?w6]!2tPdv&g*:r+`.,.=RK1 Dp?lV%RY-jtoڎЀ5oaj˥H q ؇II9mL#Cf9-3~סA6IzC<:Wv'W9t7;7v.鸈k\ĥ-N"bIL>ᅙM|iV,㋞8taFB!mzR ٳ<8՘j򆏝w lZT $rqOZ`Zö́r_{6k2VJҽnbt R358Iև# o>iS"ZzʸW (4џ0* vґL Z=([Wxw[Jz@eX؞QF2t 2z1NQt-)3e;\ؖ%E;Rf&ƽb\ ٫s{z[+6E1ayƱ~gY3/DWOQir&S{QrTDؙmt\=v5_ P.5ALa5' ~{ˏlCj9ԷiHO~BT<~Fw]r>QfhZkUВ:~p&2KsR3w [/ nΕ!d Z3(mUr)#䬷.wH'!YLX5 DL-<יl1+~BH )*HOLNqLK[&JN16 Md)'t6;29D@Y GA1Ŝlwf Esq}yUf; `DH6k~Wgظ8Lج(y`꺏VRv2oaB$\ٷ J$6 P|3#MG3Y7e崙C6:\rl1ZPqdW +5XqɭK?0|'IPmVE"J3=~F_?20RLQySԂI!D?j!6,hOd9{ynVB8aSg:]#cyƐ, r@ckPjf>qio | fs3'Ose1 1Tg;#"AiSFnu3#?;!6}I*cWrEc}fD;jѕY;o¨E+!׬f2j`CnWP$cs8Ylb՛endstream endobj 638 0 obj << /Filter /FlateDecode /Length 3107 >> stream xڽZYo~`/>rƮȅC{Hb!-ySWtsZǮ@3}UUW}u4}7oYT˫Y[V-g߼)h.j] А/L3/tyVYT ,#}_x.hqҵwXeV5&P7[^*ywW0}(7# Xa5pd)dyQPS.nmZh8[bryEp—Gyr'[ʈ6@r%o#h-I3!|HF_A U[ "ZqK!=I{lsݑs;$a/nq˞(*;s*9;? B2Ofup_+?c&ܮX'\Hi@zc`'@I[7>Ⱥ0$wL/nI7<'Āݩ vdih۲VPp[ǁfQ bD6~K DʖY=r jOW#gn!UBն{n=d)l^,|Վ2Ppkr[&ray4Hn5Dꚢ%JWh${=s VZA[uc;<)χveLQd4ӧٴ*_5*vܡO4 ``0d`@wD ?آF-Dh($36"B*k#^[KKu;3qU?ZQp}Ѝ^h8[4)Ô%mg̎A}L2s9>(#~1Mt+Tc̃Wf~uxl* 7-=[@9Km46!jeEM4ϺrFt!Ánu@) Fd:D\Y-(X͸L@|8똟k|LIǕiwSkLpLkJ\/]*? IƢlۤc4lK:eKD$?!J=2iHa\U {܃6~Ťl"o3.e$5ɵmr̿)5vl. 8+"<u9!vL*$1R Z;>nI]pxRaq>"݁WؾR\a)%4A ^ v<%&+"_ tZSj*Y^RP ʦԴg J !㢫ᡀUW`ݯ*FRaG]a5|cW/ @˶7R齍[s5O/FeEo]hv{لjA!^s9Tde[a$DxBQquG?kSnT,>sFk_pb/nc'?~Ij„p D`xyv/!NjQ+SV?]﷜kueH>|2y]ld~0S4"R+[?X=l!:-`rUg˩59'P=E񭮩LV gWeK6~DKu1nƫߕmtz [(69;y/c,C` !M cG>QGc] uЅW o{[i}a:Z)a6u/^)%Z C9Ն^2 )]rSQH[0@qB2ŐؒPd5 k$BI9h0|ߟX_’k9XXB_mDJ#JҎ9zQq*Ug7m}~ Mڢyl٧ﲦz\BpFcLh\N%^V/)]1f0є} j5_ՈW.*%\dmK<ؗ@* #-I W* zَfgVhJXb!=aa[Ct`!Ϙg ۖq ί΋&o=b qbLn*`DWsS֘/ ֜i8y0K^ġ &dgaIFDG6 w/Tendstream endobj 639 0 obj << /Filter /FlateDecode /Length 3550 >> stream xr_!@!I$M;dh[Y]\Z@dgIl pppdY6󫋏tvV4zL9xJy1Z~5;:}w|-@s~}x1=`C3 8"{~_vko+ nθ@/.E 9l[*^lz=mpt:q˅)-bX%uG]ʟ .5Hv':k,@$VHbOҵ-V:/@ _eEreLTu;¶P j˳~FU5?jZ7BeieXHBe٣!c6S8zB)[OTs~?3nWλN 짅ά+ z^Lg,*+ݐ( $K[uǿf-yTɮ%!ndfïw \\&î:ldeʾ-HFZе*sīc ^G9^ȳu-?S,by!4@VAQ6yP!¹ԿS\xSvlU?c+U=^\d?0g ZR*jTPv  pJRMR&#pmv 졽\RIz3 ?ܶba`K|$ >?x`}d/M㕲#ټ-m?12U5DD~Z?㱡5_7a0/5EubF&h V&qMVy DҀ |dS:і݅͟7{d΂ظ.h$nm#]ֺwa2sR@cTJ22Qe BAjUN"[4.Vҍ,OSd;Å\;)plG9H8.0pN^ΙL݇GƥuJa0tc7Sw!ci סh.3=W\YFe~0L 9gep>?)R趱H Di !<87odtC܆r#Q͊Ku4szpk -$n<L qTgMcon"f{,pCA4_QEpaM-:L*5@,N?5ɓR7<?$,Gb̭HuF_V+r!= th U Xk: ;NNOV872dI;ټ wZ ]Yh-j /;0"$g*橄|Kɻ5M>hŁ}M'q$&t^!Q۬q&Bp٪f/Ɖk a @v3mK`p%a +&(IE{K$#b=5W$5I͐cd xwW|G/}?]m^Ϳ1eӉ=y3K:=LEډH''+JD<):ykL k~pZ{Xp&ۀ,=D\v2kdh\4$|?9lR2BI;q=Ot~ٻ%[Gጱox+R3(iponad `b j+Cf]x<>?OH?C_D ʀQT/)0 'ifh_T&KIsl[4| Q443iB7$rpY >O% (N3`$:z7pK=T.!N{fžY=j `f7T%Ha>Ni95/ÿbh\Zk;B/z!iM0mдz~ÊՇąB/têN,ÝgN#دAO)֚;7Aև)BplF>%T%,K"qq5KZ5KX> (zŹQ ]Ӕd$OhSd  D"G,g)[u+>pOB0J&ۺD uVN۹>~ubx>uN1R6lr{)RM[yQ͏D=+N8Kg\p-exŞH[/+O?b򝗗`=@"EGI_" =>3EQD40. FjZѷ84|+1!wٓqhi!y-I/6Zx/ZFDk)RjHslN)atA*U ]vȨ- TѽFV0;]+! d1yl.xY…ud*:k̰Ӟ,[AZ >]W@RA*u/'E !˪ɼn4;* q7֡E[&NYLo~Y;&}je{d$EHTLi3/] Q2>O/gjȵt`NTod&0CB8SU8[|"<"ᣓP3 k/c¿R>:#"8!$斍]/чPT)C) t(%س]؛d&2IOΣl<\cҀ# 뒮nY%%/QBp? 2NURRas> stream xZݏ۸_a^@$-p.pH@z}Ǟ? 9(-`șpf|ͫߕ#L.dBH=>ίyﴎ`\@v|%q}5-W}rJ@j"v=yE]b9/|׿E %+E^׎- l]=l{^Ml^-IRB.AG{ϫ0hDrdiXYxHkߡ᷃߾E`X j9,찉bpM zp?2.7 QQ{qoUК 0@zh{zZFr$V,U1-?Ϭ6qəz7Drʙ ,&Bq5JS|a-IYލ7%N0k2,3|4c @.,w`%x]zj/;lSAwr$N図{ϳZ:zG-KHeYJG+m_>TTBv.:Z~&(Vy[AÂ$ `pT, cv@n=];㕻ܺ*;–j9/vt-*l^fNXS%]S(!j{vF#z3BTY֕UN{'9sF:sZ Vh^vu=\r$g޲ ]*k$#vSʩؽlI8H{ Lz׺wȑt&-ݺU~hSS+uD]AX pY i0:kVL =crԡ2`N^OaÎ*Ðf "^sGc+| ;HewJdh)~v ȍ1!\ V8 |Kd !$~BL@RCp8wui87WK/oČD2H.NNjgU+YٙYL_1Hfao]}D Ba[84KnF\0d6pHU$L[r&gS_F&Y)Y2e_ /B?}vtJ $M97d>ǜIHE & I ˘lvҳD(TSk1p3H Kg|l(R3YbbZЂ0uw_ &hqh{F `FOo ? F|WM)O} ~ V~ߔpmBiD"'ÐyuWYrYm( R QSz$ >_=]= X~݂O>$)jiݖaGaHv]}(ܑ3טyۘZE}\3֥{( Do;b2.1l>]逕|/Jμd,nWa:kLE[M*3LV %t WӕkK*M)ٿ3h4۴?ek+7kF* :܎Fy;4>jxV&x+Py[+O4jsWtڇo` vpI^%qK*Q8(d.)&lJ9TVy݇C{?HI2V8ؐ! GKF,D*" M2S!RCD}*5q[Ib_«NWK.s4G{iG;^I 6G,۪MB b82rB9pU.aN~! hٹdyv$hv]ĤRJs[.?_ |"D0ׄى͏GT42w?q߸Rg9&bp{թoH۵oOn5#H> stream xڭ[[6~P˪k,7ޒd7٬kr*[V,=b+=7 NeA\Z-njy-ڢLxsЭ*˺hmxsiB/7Ow#Z[ O;7~n=6v'➟a ?.~~@bD5е{PW_Bº`畩뢪be]-.Vmi Muc 㶇X0]YX[G_I+rj@Kp zvb+v?wdn?2/y@R2x{@3 Ǒ{W=w l#/aVZm)g?!=4ػZv7,p #7\ҫ&! $$ɎX_ K $?5_y?γ]]ba# m a+c!olт?êpp# =1ؽp)i34G8Gz@=sas@&G㻋0N:"-i dΏ"-,,c)TDtl{(\6[+X2k# ʘtGR):K^Um o x`Ju~8yC|G|hױQąڅ\c*P#{e2!dC؛xn8e9g0S+fЉd"z*d$xNĂHA`^@ssS5rxwmx < ^Wiu7%xS@hcsr nx51ʚ O2*927':$!|&Nl;h(Zp##kj`}/JJxv{5,O RhjyST߾U\fqDx·b^xXЯ]c}Fm@?Lyj22zCMt2ދ҆Uz{ th]tHd 2FAͤXlc GF ܁N7 .:_i< Y`pQL|-Qɟ.9,6&Ⴙ&$;ϗģ=IO! on|)3 Ï+A?*vkwÉ 1wXky@]z{ 8a=c#KeUpl>)Pנ荆2z=d$ &-7Dպh Y%KTր'G]8Z<0]m*"P2`C~4V9UԼ8 T9e6AMlͷQ 4.՜.]՜ 6mjoB>EP9"7wo#-v*m=BщQXXM H۲h`2 T ),u0HZ!R3+_ u JLTݎ_\-~!򔱣rP2|¶OH;@pȁN* <^unu)/:'0ma3DT]2_ EcQ2%k/Y*kؼ$Ww!2H&X0U:\aP99u3iirn-޴䶌E1k0E(ALG  'Î܂eL曰KTa2,! 0unAMhcܮi+"X.mIX[x$Ȃb (z Ɵ"xVm;HpýC~3y5;vv'Heˎ/3:+3cx'g9hmLY|lnx'UaM)Rۀuv[}fy 4rc(na2cIǗ9j߇Hٕ i/QnV[w!^ DKu4l |) 8c Nkh}}αθF9ޘ`^a$y 4EY 8b٘љ63Mm<z%#w_ǟ3! :Qch1D@H֍,VM qbu⽭A8`Nԁda?&k,=bf-]|`>[oO$.}j? CVS]`ĝXza~{7+iz>1@@,[eUƸTE3 amli7/BS,J/LSۘBr޿gpBH7wmI1~0N];oc]Fz쁧"'<@Y(&zՀ:9+#sњ~N*b>~u b烬 EU1k&9C.J3QWNHG/KEMPY}AMd?fiS>3L5Y u =liL`6wT4SНdΎ}"l,TFZEjvuW)uX;t4p{fj,u;SRA%;^*973@܍Z2 ͬDŽPR4N$3 [?, }n Hףy@?lVS׿Ĺcp VQ@K 9B_?xva^0 Y8 fi8ɭ_Z +X0~uI<Fj|`\!7Z\>+ւIӤiD^;0/rc4Nm g"o}_ kw\h5ġ[AI͵$;\y?030`Aň!,Lu^,<d*m&zo"oXHse]$p?aMCPsr cxd ;9E|nvW3 nn}HUL$g[oC\xmQgVS?n5t%VN{e.8.6c bvvYq4ȵgS0XX/3i0TWR:2QF2f"u"uэqvptl+ѫsR:&UROp*G,n'Iammxȷcռ2>KpSv!Oʉ]8^:S\b 1ÈXkI͹ys$[hJ?>vv ku۹ KCEM[Âb$QThU˻+DuŚU Z9  g*)?8jIendstream endobj 642 0 obj << /Filter /FlateDecode /Length 4828 >> stream xKJӫTB{z* %I#.D$!d3{o"L P(Bq8? X 7O`>v.mvAuah'~q W:8P!18{6^\ Hj\נAps`QOqE5؆2CI*GQO,W6hdQrVH$~T ωօԼgHP=!И.r0Ŝl5` ~4sSUޔ=0^ink&xGJw+@@=Ҩ(, HP=F|./ =Y,x` u 5iR kZ!|%%'qR1$i{ͼG}lF!J(褽 fo.F apY!gDLr<0ڧƛ}V RʷmZnl[%=s>D?uq~_"m(ғɽ.2lޮ&TN't% OG\rgOH'쒗p$ݮmin6~pzބٲ̡ V#GQlr֠dYHd-غNHE 0>rSE[Pгr5Y*MMS&&lDZG(` d#Q19Թ9^H_gb[yz^x=3 Jfyc5dNй ĭ=8ZEaa?MT`ϋ` ~B78-"Y%Z &)ւ9h~=8ڢњq["2E61\lk7xMfj U#~#Ma~4j/yWȴ[:, }V +;\GEב]1F; +}-|5l8!3+T9)bIHGSv{a^=e4(] %)RTIq*ʐAN6D% uǽp0aoeQz"Zdv<wVcv>$06~ƒ { yU/}H;?#%Dϋ`g 3 ?ߤ"eHP TRQӍ34_)]@(VYf6_CQi"ٽF_ߔ4!xNmYzNJStH!cBBDj\ߑL O^{j}LvGkՁPq KϿKQͽe(`ҹ4qKe~_Qn=(@\z#tOEM/y`GN+zY Q7vڄD*@'~nfO^WK6^ qpuBK< FGN(Wm.*h@p}}ýGM:`Zzz~fkC0\$/EօƴxrL㼧1 ÈS ) eM keMJ%G4DtloEq1p$A[UlBj@%zaZJ@v >P;w~ɠN‚ֺ#J,!bt) 3J!4C̜50!hBh 95$$7jkH5.@ȣ BVC_SbvΪ?&FsPa_R$ykbV͇A'̂,]VCxmb % (L9}.lM(| Ӣ$y=Iޢva6!8,q9AYor+K[g,Ch0{A ]}<69S:XF=:%.$FԄ- '=Bz 1} X> 4z h4svJ WY;ĩFYw@82a60[5X`>xXwc& ĀzL) %PF-WcZ@NZtOF^͆YZʐǜUA.YnIUsDn qG׭K[IM7x#xaZ}yH 6m"q˛ &hVQL%eɡta-4$5nInA/ﺘ&惂k:Fch>iGlvð.qy! 4U;=uvʉh YWu)Jӗ96* s P-u O 65IRkt}x>SsNǧ;w5ьs'"uQB% FA.HB ˸!Zp%XBՈ RRXH"7%URB DRO@eap*D}xCU.g` $+c):XSL_ڐ&C.5s\ݯL\Cb'G鰱@|:] B筰vdl93MM_pOCVgkL&MM ̊J.*3Y h'1$ tL~|bi(^O?&ƌfFz*SEhw >KQi 9}Q:.=nC_)qa\dEA IQ3r=tEԹ䪝Uihf.4[ dvVkձ3E{%zS/^|;~$stHٱyG1*cCDep]Ι# dakVM⛫"*ˣVXq>%/vKqR"ly\냱"JCoȘ`+%8jJnn-UUE.n&tCO1هݱ0!eksIN%ejGmZܮh =zkU7?e$t-bIxVH'#gˬτm]«Xdc07 PγH+ϻxG+9g/k >ӂ}? aR58Qw|8pDι&j`d0uA}Zm|^^.s#fA(! $Vn? C$$ P4C?L H \d*O{.i잜΋ReE _ˤHrBRkޤogtÙR_5}"e)ToqЬ:0J9xyFo{ʋhSq6/p\CsPN +*mSxq=]dc|l' {{.0&2Mv3L!6o߽[[KNECaTO[F50 ǦսyHr`xB4ͬ@ x!/45r,cv4𙁅aˀ,Bͅ !(HT(Δ[R4J;/XT8Ւ3+XwwUx5{;zendstream endobj 643 0 obj << /Filter /FlateDecode /Length1 1676 /Length2 7522 /Length3 0 /Length 8587 >> stream xڍtTj6)tH- ұtHwwK²Ē"H " Ғ҂ ?zgwgyaa疵Y`P7O h 㲰@,F`;C!l Qy88PD'(n7_4~!a!q|w&[^( q+`ꩀ qmA;L?>-mG,?O_ a6[3.m8M.7w{D"ވȿ`K{Tz63S0Pǚ*Y/a,kܾ3 B_')D+l2 ׾ۍX^6]]=}7ք;=J3R-@f( M%CP'룲OSkxW%qAE,֙TLt$'O>d2=߉(5[?Y(3wo~HmFE~Li쑯F x;[ei kV! m|9O\\ʎ/y {_Y"(W6q:-z# S_f2~@=E!_s9̻ ^ tgR?ʵ[Q x^v|DQCQ%Tt2cG{§%H׫E3gb 8ʍe Nߥ[gg,bg*cT,=] P >LV<; <P3igٷމr`mU|58NdB@rEAc^ˏMp)\ޛ,&-*nEI!}C5X6e Âu M]K`fbЩৈ|o]t2 j5qqJJh>AR3(L3ZpH;p:ql&l\h9\Gٙ$SqK>zc RJU}NUV!=cV{; da-KH=Xo/v5ړ{gA]vmշ9k/R't-6Ň%-'w^j# _ĔCvd\D_S;یH&df~NFZqHJށu_G aq ~x&)Yȸ ?pvI2GomS\l2:ot)U/#ae2@W-aCz U\vn[zjGH,ĘD h1UZ{_vT&`~j`jaRmxn.<|?-{77☽I*ۓ$M_- [h}6}sDSm6 z1 ֮)={r3O>j /ߘ~99j KrFRԭ838ǙxȲgVŔ ^aN?~F%ådGz7sZpa!ToV7n()>Zfm/ k/$>{zrt2hMu zϬXw vjIjŪ%@D3v԰zH2FDi"rNsDCڔ6l7 w !0ci4ӦG_R5vKO^} ȕY\M%$e>,B/*q'r.9X* ac &W~|٥{O NYmw}T;Cd웦:cPE>&Em)ڢC* )t Z1bkx2__?){ŝװ_o$OEf]:M*a-$ם(+<..vA30HƄZ jIB: 8n4|G}s{8kْ[a'gkģV2U-{JKXB8QPпQ3WT6,ǷϪ6'dK ^NÊey̖`&9+FFOZZ;(KM5;wsHc?nsX<_$cz9θnNLD%1Hs '=˜N $p->)qT 5VR{_C6avT0d*Bޗ} P6&E닥(R北ʢo̮+j,C y!]8B}PH%h6^Y[0BW5fU~(eF\xzA.VXfƿʷ}Opb.@3?%YS*[g#*L\q 1G*,^bqlIV e奺`$ݞldAgȤ|5 h1*iHX,,r92įҊ~`D"y5= gc]^AB&A X~`E2w~g0ߧ1,a+gnR@@g^x^DCe?`#,;cU8ZC^ޏ5rx (?Qc^Kf67^P|h|UT?g2q{{gy0'%"fR1v"\,qZo%ğ7)x~ٺ NZ4s@(V@BiǚFZ`%l&̞.#8GY[#+d l'ntLe61>zG~-swb;v=`4emug8yOǓ&wo;-2*ns!ULo %~}^uBm ?jO"&B ifMײl. ίKJ1,'kЭ`)vvyy981g+ixd64ӂR+⃝ϔјiοL|'*%ii*B:~ {`4֝ >.9G]o }ѤSmG{mU tS7}0 PA,;4}HiECI3 }E:{'0;ub07JD""CLya/8Am'˹oZ6)< dl9<Pr)ڮEO1j=O:x4f-ٔѸ،LۯegY $Vf!KzV);*˙_XzV%AN ˋDĎ~ˍ"&-{n$Qjw7}ϙbia`-<8D?ZeP I=f7s&;jrFֽHn,࢛[mG|9l=ʾgʿQo%>d~F.xw{g#h cuDdC ')<6ΦQTq9rlH^O"5;7=Y'펥q.ƆkC0A,qG̜LRzz&,mYy Q\**" r "MlGZ ab){,bǬ Cz9BG!Js\1ki'J#JA/ڹu Gk|Ƅܳy )B˪/.s96ˍ2N0-VZT,tpkWL@z⢢SD|!]^X8<1bQ~ ;SF)pz.32x#j @ #\!b5AyG8t`ڴsvĩjH@1I7Oinf+o6ƚT-U%==od_rK^ے4eYrmA 1[>RK9`DkXwg<@i ɺDdo-LwM5#X}oLUy{iKoozsoĜ_4U\D5OtL&ߢؙOy ?RR>k7":ب7Fʼn*D/ z3 p/=ګˈYX4;=όhB&6s"",)@Z  D亊wՍx֝h%CB-hw vV5G6Ytf0Q(}K2c 0Ku 4, p)琪eDmLv$BtӻW* >pL(t:AQ^.^XciT<,ݶ:|.l,J6uc-(X,kԳ0CVtM8L򞵐UOfK}ӽҰ؛Q%=}kqR9SZ +(ck(Ty^&yAzg}b T4/WJnO@Dc^d*amCFE$QnqE'Jq(ld 0Lα8 ĻAY@ٴF7BHh,sps/JQ`ͱ OA:ii.jtצ^V 4%nqھLyշ$aVWKxFW*fusm*|XG9 CMԺ}POhGT[K]L1p$zg5F D=roz۾ހ&o;u6>reҥNY{HK&Q]-HPV0gpTUSI2oq H38 _hj߷~*dnIYQǚmye{Ow7s*GڴI4n,NL:6:g֐i`Oh(3Dt#VȤKb/,;{˒0[4k{>A}/_BţqNX6xP΄`]TS,,F))Nu"wo)j8җZo0 FF=HCg l3#ҌprYmV;4 ]d)}mC)'惁𾾋d4zvAjpOOc=죇ʿ叹G8bf)2{;Q S|:R;xNЁޜڷ2d:"x?9 wKA̳v>")ZD#ުaB,6f;.:Ҥg _%[kL}FMIEQ~1(o冚 xU/O'e/Da 򓉡-^mNTHUG~:24sJzBoQ"uuE DYd~K_ʷz~\N (t YN&54'?+<7.9XY+ZNcho?[5DYfcH#p۱i>K3aBҶ;?-`R\-L؛#$H{p[,D ԴxSavZ+fX0^R^SQ~8|3>Uҥ sHeT}G+  OWmN909zةzil@Zhp eمpːmPQ/kLh, gNh szk$ W.lR$rf˱0ݬ. 'i t hv^]sqd4uYÓfL9UvY.T/xp=_6Ls{ͅK4aWT[endstream endobj 644 0 obj << /Filter /FlateDecode /Length1 2230 /Length2 17658 /Length3 0 /Length 18999 >> stream xڌP ^8%hqwwwwŵ)Zҽ}3Nf߳YZorbE:!S1PdLD bb02322#Z:G@ttqBh.5r~7XLL܌fFF19rD\,Mri \dhin&T&..l&Fv9#g {F# BPZ8;s30:уheh C od 5zrӿ* 3gW#G ]`ciszwbg tgHv6-?037 `fi(;9L8\,m U@\H `?L-흝,mpdbv" [[ŸD-&}wgZہ\<YڙaŞA PJ?6"2s3 tL,$PuKGd0{4 x:Ύ_ޞTo04q-F >~L?>O)ARRDXQ?=_OῈ=_A/b0EQ4(ZE\QFE`dcoW=1&ElS?<wh~ ߋ/fO&z&,{l!{E6FƦ37DuyfF}/оWh7{-lL-`{ 4$toM卵,nw{矝on(OIz 4T&@7 <Ȅ'Ȫ>w+$/EoMf"}8ie ~&q}nya9 מ DwtDDqow೉$(`)ŸK #Q{LTQ>-&R`sHwwԖebpn+;|ݶ ܉ O z@HpA^E'v ػ$ qF&eQ_-@~-u9@4<1ܠJ]/e{uGLaadt@f)WaJ2ğxޗn)}6thlT)Ar |UM.f!kSvh lW(g5u8,:TKV4XX>XU!34 kyz\wu S,;DEGyl6ze?^014Exnyոmقztزm.DD9A8$_Wvn?UAęU>cM :X\O<^N 2yԧه !h~*I 1(4ϷQc{v`sm[~! bMB!Si8(!6Ch-7j  :Hˀ dR@hq0` J b9;H[I!*(”5JI[QKd䎳:&+@2dn&㮶, #/.Rc_^W  FzM0i*-оtfy|VC;nBh`M5WJ|%} 8UPzpJ̆ˏXޟ7m c,6-Ε]3fB?e. zqܟZB5 I86{jC8Tg4ݱy;Q{bղۃ7=gjElA{U+ئX%@S~c#KS)M&8{$Kz6!Bӧ[$)2&G7#Ϫ)"j֑G"%R{ tIzl6%fmna^-)Oo&Rr$ÝM*OBL,X.'ӿXSJ,y,jb4ɝR7GyWs`撰u4!2 ˙e k&E33^Y ' ڜ&knKjEBwbxӑedcS6yX*);4 !Yg[#ޱ~a}HhX}44U,9UF$bo|8!O xD!CPP"0qnpS 6mb,}|5CB5E -tC"݆v)f8q+,~Yzzb́ٸCjw;PGUIaW~۳SjV(hKP&_M$1$B34%~C#OmAfa3ջ.@Rq8:Y٪<խ^3 R׽[=G0@ 0QpYZzꂙ.: yǯjDfJn]:m(O9'}TJwqMJlw8Ʋ l8|J Xla jXMC"*rwss܇!b^"=<$!nh};ZM1E0UJ"knO\Uv8ĸ/I1GMdo6`z,q ~;)'P`Cp ؑo-B-Vp)N+nE XCn9e7 V~H GwTOAbVОLLなu/l2^~;^ΰEV¦G|K,=>:]Ms,/.yvjHrwZt·iG_=~";35~3؏m:"4N5!8(V3a; ɊNt E4 ȗTFe 3R/gXFW*rзHW ZL0ߣ3e/ֱxsXQ!"U0@'3=Q!&+=5wRͺ[[`u7OնkEBzgzhɔ𶳢:_{}./a4W^8md prX(rIOnw n5NԜBrڏ|7elbQdc&gc*BNa֑mb;\i ]AD#T=egSxԽjyg_&`ux <ֳtǬ䥥eH!r)CF)W'EW$I)#e.|jÍwnX]+%Q4X/8Ӈp̉mEb9M\af8|d[Ng&16 N3BԏNBfF'& /I@s^K O6ov_D a(B984wB"x V|gc]j,hDs=Σe#+hӷCEu@JNd{1.),1tܾ>>ۣR!ˑ$M*e8~7x)e2RvQI%a&6,b bn(a wWq^ܟ>}^W-h7 /%D5BCO6U67F>N"2kWbIIroUn$ua0i]DyR.2_Γ]&TmIBMkP%;rbe#m&3dw1i=ɲ!9+ a''߇L(__NTG{zp H#9)ؘ@OHβSua^?f>Qm8COvOQȅ(s/n\Xb?BZ3"=宏Bf[-g#].JxyS X `O[4XK##/AN:zlU{a!A}w3`]b-85GJ78L.N ~m +slN)1u{ӼCn\ %~`zj{e>AY%aY8PM$-%TB=PK( a$^L!}'k||.c(/ oQ<=հti.5OJV?s|{ӼjMdQnUr=| eB8fJh GmDtQSrTrƑMov$$LT?j!RқGP@ifGOk2Ȉ>hrQ8ī V54tODM+xJWַb .^l0DM| X,-+F/ظF7b(T(*5.FwoYuVP0 !dWp#sKp;@[7j̜SFtQx9>f=As*< zPZe||%Hߑ1(j5  2{"͙_dNM`jwV٘|ivaȇ* ^!|Hӕmdl:Kƀ.]%غi7QDja?zDNG_L=?]v:fqx%EPI~>WyP3=k"Nr~izPF (MP`BY\fܸṈ}+X iUQ[L ,{|pO3YgܰQ#bMɸG̓RmiT{B_'3B%S-M,rpl(V6ָ۾*GrESAbdʹ% Ȧ> |7E~g`p^ނGb:%B_q|81Sx@M'ګnp.6vIM1KeK[C57[0(&*$:(ǢF<VnJ ]gV!); 0^+g_fsM/ږ5d(DK iq-ryO,`pQp/2:-l)vKVxшvgPZ2Wy ӚED2o إ|W3H;*)n e-aOEA bw{;'*19τf: $AZ+ %cy؏g̼c0χHl6 ѣ6EsњNѪLdI[Wn2/zҮcVXq8\§ 'pP( b4HY;$v#娔U+ 46(T$AYsm u\Etdf̻^𵀌Fc!^%-b*`ƪ;9^&DVMQ`dtLm4 ֱ;AGb견< 1d!a$YNzV7 [BS_3` d<7[8ơZ VC1vv~?b7DT1|!:N%IxishK=*]lKia^_3"7J ;aȼwsIdVO%%#R?OwW=~Q18\%6`mϠ޷4rch_Թ,~^j ) Wg]clZ- 7D&vqŽo11e&\\4`v1koG)d^FDcItw[N\Ѻ޺{q ?KP +c/!8[Y [|8V"k̵Mq KYn|sϡcT[vՙGlT 87 iM&uQYJC}6Ѕa/4Zy'stJ5 |2'޼ȹ`U XsP_u`eig!/UcdgCny| 8-S">SN A%!{XiFm|B!oƦ-zQoO6Z^{yVQO۴7/3FLB`TEtRp$_̉ Da/9T&YvaBV}+ f;P ڏ @Ŧ=w/aN- ճPu6iqU\!誎dE$vHO d]_ Y<e&ST♆|8!̞ɚm~vJs`/]D։L_b*g.: `mr]s/2]SBCiJB,z m`"xTG^8VV `c =. pM ii@~!WwLV<1ß&4iU$HDk""CtoHg5txtZB@ӷ3#j(ހ,U4ւ>!qGYUӻ"ABIX߹eQla~'xf#8e|Us5[1_?ڙ4gZ(*_I G#̾Q{DO`>>`n;3z.pg4Ul,py ;ZC`A V+^lLR%yjSS<`KLaţ2W)Yjr*˳+m;C\&/yJ,&6ZpGnZg?mVK"c_Pp׆Wv9K >Zp`~N 8fΛO{YeM\_"V'lsۺs%y a0VF8Fh>]M ( T-wEf \u$SZh d ?k琲R,P)ͅ ylS l$tYyJӆKM9){O0,obA)$Z8vµ6uwY=2 JwבO&!ҸRzF L,7(qCC⨎Qc> ׽)!98}|h}?9NQ_#; v zɾq!J ?,PM'K<6;c<[ ֏JB5JV8K`Iڃe{Cs;sM_ʇ&!¬G֒FY1.;f:u`-8 90DQV)usLna8xy__onY1}a~bR9`'4n`} +lݓ=L+U^ \x@5w L4:H1$N<ဲ /ZoY<>ځ%k*Q>8>TPDUhB@X+GbW,)pofY=%.?<+z9zZyQ.b5qHrr |O;{K1g#6؁:9Na3ELA]laz_X2ߺAe <uvMꑦ֯Ph/7b$ʓe7ӭ )u&g%M0jBUU~\ԕhDx7tp+ɭY9#ѹ쵒Xf!q?_1dqc-T{u} wkpeZw/b;0@=GLxTbx#QZy(WAm^9hp9X>3 OMeԔ?x3mL]UĔ B Efٟk3hJ6/ eE# j?R,-9<4Ƚ{Hw -i5N2-i/@]вb,d˱8aDx72Q ٳ;{X<59ˮ,[reA f(Q 3SND2(nBY8wtS^t2 e3l65)`jCẻ*7Wwy(pyvqƏ^G*It\?k?a2IUQ֤A ]/z C7V#W "0GCEð4dr,H29h:h |w(xZr-X 6g\'iK[ ]|ɒFsw7z>euq܌0ˉi97?yP&YO4J2" oO ,0{bG͹z(rP͜˱Suk'g3/q'7)r [7d(Lb ;#iݎ;2r3Tޱ}󜚟1@̄(6xkAGI;Qr}))Yr^ :b-o?T}竖~wĿYF"h >E6<-'!e\RA꿏'" PYb@8x}o/oFUu7l'=UGP$R 7l&d9^,K|RIHyɘۚQQ Cp#|b?N3-nW+R%)S=享x(8̺DP\Dn#\OX*O#_._O9<0 V ۗ\zcf?Ⱥ9jZNq@*,ƨ^+~Dbr]Qz뿦PMܶnOţgĮ3݁}E^HB5ґ1И>+eݴi@hrXkd;3b.3dvOZ4_f}5C3ً?4ڤ_[?MsȂAِ"׆ 9܆|<֟k- GhB}BMi=R?θί ݶ0>]vKoNI,r-Fmm֠aA%Nk*T!+q0Qk@1"4Tm[E!{v*yjQu}Z>rtXYhCŷi>qfFQ5k\Kt-L9{\FZR0.u z@ϕxCKly:rK 52bWU|f5S"SiS4I~ DUb^3-lmu:/ Mim݃P򉖵ه!@:]K-|Վuw!nQͅ.Lm;J3pQԏk (D 8I"0*h-?!2 _M]ֵZ/Z/1E0?f=DVưN!uBAxZԍnH'gWlpo=]v4w7wft=?fU|Ӱ|܅d;k!Q JH7R66gP5ؼDXZDK0:zYA|3n.ϗUq jI+O%Bf8XR!|=Q:~r)U#FRJ!";5SFfoXr0K;BfÿT8%P4dSʉV1Y֪գSOz,Gڎ--!߰tPP+&z4VNH7~S~ As8C@<ju{V7͏AG6._1Av f rY#riu]5+g Ϯ&zznoaq5q?%Aǻ UEPsa0G)$yH]ՙO 4G^3V|by%Ȕ /'gբ&Kp.AsCw.D2{ZCZvSsěg[!$gU #UN սOWտh輆>Xa.xjnWDz%zȘ8#MN p?*jxo58c:ۚ'5RjG-L !S,_a:ѳˎZlQ{W)"ژzs==*{Uw"=Gʽ?xe1j {IF5E+ђg LngJِ"{22h`t&>4-Q#](nr!):ijQx~´([e^'F36 G2RI}[3ƪ{jqM6}Y PFQ|28"j,^G"jwØЩ=Q.Ԡ }Kv B! gr=[1Iؓ8'<u2=xxQ>%RKVb67#Xm| X6qrZm/Pi2s e{݃MNyu~֍`H0a(al6zK X]/FM5ZW~n6cAH%~{lnXby2R#fm} AD@z#~ֽ_A;V9ޙht$ 2[ט"jJ@~JLwxy@ʣD\gTJp&ibo Yvd29a~|U1N(+BgW>[V;PAL²:QQإ/to?f$-qj#EUWpBFklzxͱa&ǝdQyگ-OEIf,ݞs66Klڴc5VY6ވzrvO֡ul݃"wg({,ճ\GpC V(I +o YdYÞj%jUQ5 YwQP"#fc6} EKڋH7"fXyNPlj0M}J͢@=bkMkK;_.] ^ !1| ŏu9j'mHywq2Er;54ў~ k[OFL:TU9cYHG-Jg)Yb~edDz:9f'gA 0_y%[`!jzv[}:ivmWT2T|GVfؘD( &?x7M\c,bhk#?Zc:lty쳅%ɧ0y&ɂ2 !_LbF+ށfrr~ g>;L_3۷i;imӻhf^V"8S&k_w4Uugj#ddl~ ] 0U*VȟW SZ𧘈:eR9zqbܽ=V u~v̗l8 'u`Lj񬳃+A-~ 8ϗ'6yBoIśVO.5%o qϘ~Bv_ vQ)z.ϯyŸH/I|(6S2DOOus_>nDG\wo.7u)BÂ(Ex|n`ne)|Z;4Q1U3Gw>EPFw[4=_I!':ATVպ7z(ry Z^W[oey_%Um==]WoR~e38SLklS8-pnq2*D#OuİŲdn|ʑ$o؍_mh%nHV4?)V)LS k^_sѤpG!.ËIٷu 4/HgQ C'MNq/`/ٸ?[V 5>P[;S!Ҹ4lwYC2-t@"U“&׉aifήdb}m$M;H:d~јC:]J9:-ϊ;yq::_ uIf},*c~.T QWCK5cJ<03zC? +ӯӭMf}fC>o/5G b$7&&y:PpA!jՆÙԫ4I-E擊U}x ->RPL.CN }Q-DG<ׇ1\֒OJ}LnКu >EMjs(5˟\V:FRFܡyҡ t 0ɀv+WI ӟw2Tжy` TsI%0 LVAqas&DiNUw9_oeI.*JjŊ|S>*9ѯO_? Ĺ\nÍU k$Gv^6sTN?"m?&B(\zi;Y .bZz0(|Hj#?^ ȓAu3xW߫=ʯN8jjUW0M\Upwg/2LMO]{]}ZM'-wf|O@x9->01fQ|,x1ؙ+o^0^{Do|/^'ᖀd^2mNse;)kyL|{[hό'HN$b\.)7Dqo5ma1e k  _ 'S(%;ޢT% R`;Q2m w).`)౪Sx=FK}^ODͥ5 òFGQ^VIw 7?4l;==2m?ﵞ8.u7V]#\&?0anijH=|}t~ c2KG+4Xj;')|,s(~Zm %o~;!pIA|f5/|ݭsEͲ}ēKƊ']EIfiH ֛6-S|<+#XԜl!]~:Bm:3r;,WbN6A/29_4h@M. a!^HK9Ͳqt22]d?k;ʔa+`_*2 J̖O.iLWٯ>(%d>Cl2Vn {t Vy$;}\jqȀ*jS&/ǏB=k]%'{~p TbIj?\}>0ش?0|.+2Efa9T Qr0>y/5DbD]!r2?)k\ȉ#I'(tuu> stream xڍPڶ-;wwwkpi!xpwwwCw-}UUWu1}5jjrU fq 3ؕM $ `cdac@r-F:2tɤL]y7;;'GW ?)Sw@  QK:8z9]@gN`e ntJ@v sB Y: zxxڻ88[3<@u ha=XA.5,]=L7vyp[orG /cſ p{[,Av@"+l񇡩Û)M2jӷnE?¼MYl!`o Qh6v/ֿnX4aȪ 96y!#x98@'ܚ*u|k n@?+!,@3O71/v O>l|s2|?UZJF_G'! af0spyoj \v}T7^zSvxc-@ ظ߾#.HO5ݟdi\@m T*-@n[+j`723sq%Ȁ< Ws(\U.?ޖ7/6{/s۷南L]ޖk;Xw<SggS/Czl+ ֳk! X%x2 ?R `UXޢb]M#%Hſ ;/_j/_hm9ۙڛY;[9` /[)t67;vߪsgo]L]eV뿆__-bƠ?=|@O9򂃹`McH]8ΔYTfFNBM]>Md^\7d2Vdf 7sI> کe$Q}V辴KI482]=}يxZ[f#vhED)gR(#%zal|Y" w9)-8B,_VW~c[B^><~'kX?n~7f׭Hp )[GH3 cėӦ߁kWdcЁp]qbŗ]luvCUh-ba0U&"&G,yXDWȒcL3rdʏQVI?gK"Ӭ^>X61DH<~/ 7`/TRZ,3`WQo#C,j`0 BC~^&z2h,>5?I'm>Bfl~EKxAHRyq45dbh銖hs ԡ m;ܳnA/ŊBC9aHҺOl0~^2UbCǸB©h2fh\yY_}!Zxc(\ѵrkQ 8:v(5uJuvodQ!OBDken.⠢q~]'z3mo 'u#4jh^v݉X8|9W*NE|d4It D >}5Ċ伔zZuQs1o88 +ˣ."V=pA]b*}V87PVuX7{~]ur+7@T^MHnkqFMwBN>K_.bOXSÉpaCpR(Vh 2f}MY/xZWb]Y2M8>CuFvh]xnDI/U:yh, vDD 6/ɨ6*ؿyWspzԯ6(4Kb6W~{ff>%^xAkȢi֟LzV,n֝#EWdAJ8XvDֵF4v|8\[ŀ.ۈYO09\ib!xE3U{Mj"I.O]56L(F_>R}:M\V_HRz `hus( |u9'e4,Pn ʹ͸B9:. 0%qp@j5{39 >~;mA.rU>y|g\@|-7XO<&8R%ubLVQey7 /F=bs* |>/:yY/= -MD_Tp>ȓwS!.5\>;۩/I.1q%e|AgF~jҤOiorfѢ* ,`5lB7H_V˅ 5o,J])uTօOu_,fB݌H2ERaǑtLpO Qm -ݘkkpD+ج s}Ef-g&)t0yKu'I*>4g;wmx<Zb~JOlBG$*m/< JJnΗCm3WFn4P^x."8*,PfJ  7GxY$_Ր=:m6-g]1/ K g) ='z~zl9,(;tLNW5=wyXe+PŖ5#1wUܨ nJr 5]v 3P<:|L7n{X"h>'S^֨h׆DpxHd\PJJ\sv~.nd ja&WUITŖױYfc6XB8]ݏZO+;MaLȟ07mNR(ohy.Pe{ăx Aᘜ@]pV/BKsC;H'ݏM9~&9_`.v٫V>xϩۈ{ܹ՚Q*;ΠSa@L IP[x(Ph ćDfj>u0>HF!0O7 {O/)N*([B@R9>*:bO?Ԝ[$@#KNhJqks T?hvr}9|]Ή u%v-,wo'[ IےnInJ̽i٠@$0\"$ ܱL;PgWr@z@jz۞bغoݺ"2-k-5lTpa e>KoI;F;w w[i©X4`ȳ  ]c΄EWsl헗n- >С2:z j1a'D#6ondC~0!``mWdL)LWZ\F;C\g"(I143mS3Wb/\7.TVYqSrޢ6)@RnVby*kIڸa~X3=ϜB@ ^16UpWb'3K}wwL{艤AW!d)5"jt{U 2{hFTOcʨPLgho į#j4(##xp(^)EC3w2^W{ъgV+9K0 7=UW ƽha40F(+ŭ,G$jw޼P)FnvqPu"HzYӍ ^ wcv-wVuO>SG&ӟ>gsq7r0N"+6=[leL/+F5BAjqμes@.l 9~(++Eb^|W#t{պ9 wN,m\k\t Beq/FcI P2пR*F} u(*wD=-ώB́>"{Y63v㷚@h. >iO-axF'[ Ԫ_K n !Œh@ ½/ #jy|NZT'.G`jcN)NZ; uˊp1m%]l..z&p6Z Zu/((;"MT +EH$⶯c\'ʓx*NwA|De4r+b%Ht\\NmNznc;lƬ eG+nI'łb̀ݼ 3:W? - ;jj*"&=qD˛PsoĦQu#{v nJ8cb!L0qhQ!Ya%In? 0ҐЙ|BR BhF!<&a6N֌}6M2R^]ܪ;֘1&Gz^73Qf\XbF YdF۫ X~?{Ўjf .2^~nBO3S (sRbм{s$ u$NB7*b96k$T]Qפb$H➋*Ժ?}^ ;gm~fgQrQH@T%-b}(P0bU֔I_J  OYcSPdd6~\ԣxq53i}1t^&eH5Q#]2ŋf.w,ԇHtwdl΅u,=%u Φn2i}e\Dtkz)d;d4}DOnM?ΛK[Yr[k6el2!_c?'5E%]Sc(y0`oQTx5O(z}J[w|VsqHߗ)ʧHp( $lu9gkVGn҄&\[Z'W#4*׭ϧmXDZP9TBL l5-(˛ O(Ϗ[`BNpZPzNӏN^* <~mI (hФ% (˂>i޾ ~'f?ԯD`gѩ ;/UU:V{60f^#H##q >@cWA!o43L"OzRgK -sbK0YNsߖS{}nm9LoZQ%,,srJz?U<1QsRS)O5jggQib{/Z/t|oH0Te>8= vᔅGPFPSs˫3CVwpASyeE#uZ yabB()g"Fu)61:=Zw< <S G~(_<>lֿsI&@rp=G @7rͿҔ+fJH2K[BѸ:/v,W_z ,d;ʐv"1Kt%: əG`AMzw=*⡢t|q/6&b͔}ܴ GNڶQ|xw~+~ytx5-wiς>7Ub6: GW-Өlt S~"e#y?-<(0 #\JF Knm?* r}9]:zK-~R>㇇CR!x0 -JQHWlX rՄ1ABp|zVshbD ak Hi Nݾ(/8[L3'Rie8VQfЛ=:6KȼiB)[j*)gv\]P a쭋ീ^+J~&<|Zs9 3+ W u/]4{5.\Slp/ˈa XTmIs "dt$٩X%|k +2uJѭBhSae"ѕ{]b,Ƃm \n ɲQ) '9,-nUg+hxkpr[׀Pʙ]#]f4~Y]fZ֪̻Mdd)?tPU|N ǺQw:93br3MvM=V)E} @f-)hwyr>Ps̷,Nh5KǤ ρcliw7Ŋ:0uu#iHg# F3Jl%/5laHWk{|-!_,0N7G+/F,ؖs;Q֡k*IV~,^/Tܹ[! [KoݓIb@+11HqܧEv9Kb)Dcהhڇ>wsO Cf5SΖ xKx.cG]cJ#~&z(`z{@%%D ʔ{=c8\W8Y)A9A[͙7yT=Jmh>AtRcY ݆۰1/{ KvjlQ{҃Sa0M1Kܼ;~d0t =&e2+$P w)$Z F:|f5M26pPIŅo`-@AE7!6fgR#nznx`jvfaM&T>i|7|3ӶE046hHj&Ss/tzWv5\ UƲuYX}%UQXcbW?PdDfre[p!f;ȹ';r~dsݐVr;ܹgN{Xmrod>yg]/4#Tgn!_47DT#duDs]֢^zʯ «e,=p|z.vi$6*с$g>$j Cq.6=Uc%Tg]Qy}A6V4{ ,>yxԴv`x -*Rw0f° f& ߄^ZW0~Dn!AD6Q?ѶT .j{vqwuk2?;-z5X-?nh(5J+gnU.\;EW}첅34<_[J5ե\i-63f`\V'+x\d)Ւ  |q&OOR]Y0[BTtza.RY(FcS_w' &Sd NN-zn#rёGGsЦ$8ʦLyl4|}ں2E=]raߺ*}cV3P]U [ՉH&o~U?cRR({9u]n9ag_lJy]; #$g-uՊfxE [3_>0Lj=/qmQx0ҕa؜ R&T9LOOv MSS#O؄$Do>zL}I- V&xKJTd]s7 \}3A$#3Ro~Y~ޕǾ QX`[E%9/_R]MةJA|$gqH15yxV__5ʶ?`} 𖡽!eJX AJ+u'lʷ :*[wm?'%!Z7̯Ghm@X86{jGa1FuALiZ-jϦCAh~L,$O)>d9%15GUy\th}㖿*J:gtYG>Ush3΀#N1zeT"E\rǎ 0ȮK'o5| sOp+!cDY|$ᢉ#6;aI '4imDtjnx?xSTm0=b,7ЋV4H+X\^bni 4sc @9q1K#54Kc PdUr?҉#AT)&۟AS/8S9J̔ &~̼/idҗe$ʡ%aj_:bN̜msP/N N(o %P

҃\0'Lia{k z|鏦?ZSR^*>976Zm])JhRrh&eلeGsŋ0%|>IT)nk#!IxI.8ammeO41Be9-= :g\مڌ1(Lr=v-''Jn-yV'+o1Q[卋%꒟5-a>)p?1?J>yA^E\#=\"_CZ_d;Hf(QhR6@؆.OzwE4lo^ /_tx6Da#u# J~Z> 9҇Iz3ȡEoxP]ü(?Mk63CoqI| m -,c<"$v4G3;A/@؉%[4UÁvq)kW|&W#'7$x><_3^IrP{$c#ΣSj9i- ڗh̸칊 'V%,S0\8b3 {̶^uaX|o<՛r'D#ćuJy07(Z"e*qiR0w->-$JN|ViJ|v,r RgHU΢;^m,K{fh[ٵn%{Uƫ.<.\c lIqr(x.# r'851Pabi.S(=o&UFS#/˓{сq+ͥg̹tq #5>th4+ן8f{$$ x 3/qxr?Ved7ݬȘfl~g.م8SVyt58ro^0kHD^QpB]9T3gaendstream endobj 646 0 obj << /Filter /FlateDecode /Length1 2386 /Length2 14991 /Length3 0 /Length 16405 >> stream xڍweT.Hw Hww ݍ4 "% ҥ߫;Y35fv6_, x۫$qpL]\LC e/_ `e9U .H[`M7AV/UK*F.v@tC 6fhn?>~S+V `|gwQ'bG_6~]RX[~j'lpy$4 8Rۿ vAp%DN0W`[ C;>#V>tK6؇r} xRt \D'{?IpK09`_ueGt_l3u7uKOf4<Rp +k{ loWu;]sיzx^@sEGs0ۦQbO)AͻWwr.x{]OJv\E|y*uЊАIĒvb,XXP2D,oP.g.2 ĽGd)ti'#qI:+U"*.CY>@Un~O6$2YmNNdaqH ;p>BɭŒXWou>ȏL1t߯JS\xUc$&9ne <Ql-=OJe~$VП`}:aBS/H:R|I;fH&\>y_'ȸQi!mm sVw3 2 ~ ə }"N>_{W.rɂWC:iYvJٖ@2iNZ—nj QÙ%d?/k)0pmZw5OQd ; ^D'{`3kEXњ/Y?GafLMW-`g/{:oP%BJCM |F>h2i! ef(h#Yk@CS@"`߈ɉz"kN,(qoj,c5̉y|AQvhm[պUA{5\pbFXo&}3 ︸?UoidsEީ$*[)d5#` 3CeVuq-AI_? ٨-ƬyX"X*mg㲻,/l&𗴭j52雔EUG=q! Ad>,Ųdaf+Z{9\{vB^kSOXk-PKI^@ڃWPރf=Kq8a԰(G ([5B6Ww9. DO\#_e(dHCļ!!lK/uOgJs' >4\uȈi|B-k\*rQHR17:|aD%s`fף[VU*'mӋE;娎vT1%|CL~Y9O*F 3s=N4 fM_F=9Qp`51@A(WM޳N[^ܔef+BG  W0} LCXL;0QoSǜù ACWx:p~ؑ3\7e-YP63]] |fՀeiqGu~f%ʢV)ZUG+$c&cc@m Gf[h +C.:>>EK3Y }y ”i2BufLK.HS{|. p.g|b`pj^>nC/'+N\nʍ9Zy9gT_t8d M1STaM pA]om<4"fB.&0ÜwڵwƘ"c5Y =dD@<%iomdlLgV?S*f8iol+ S^uťrZ%۬R6D$z#<(fa]=ZX뒪rf!H1/'/6g2T姨WsBb\isSKf;E SEfRxZlpSJkJ4wk8 U3I&YN?\&]c\'cqKо뚤u.S'0υSѾyæ9e羟& LmU&!TڂE-y8{Wv9/sPYP2:ug6 o^ȗBZݵ=BS9U _'%0'&⴬^/Q `Fz1o9?Fjk.iP+0`뗑t\:m =ZSˀy" nQ6B vTbYO_2£AjG+Ӽ*;mz\:%yeZ qd;{4NJZ[?]+Ed^q"a 9a f>&|jK-Y.eFcXL~ifCO*~߶:!AvĚ\n/ImwzpY>o|[ r?H;AVBe_:8ޱ6x krSla}9S =y%P_k1~W lsʦRqPIE%*Q뗷/-/S╇TgGR%Df5m>3x-.2[* Ko֛҄1wg{V]S6h}JC`0\;_G0zS9~#5]ueX4,:bev s0K4Q #@67Y:/(•L2QqK9Zk|1?"άwvHx! r-j]RML'/b$lnRJLM >պ`k=·,f9e lM*)3, ķ&Qcu^x3!>o= e%ɅiRp  PqrΟ t3}6ܪVS|zd`XQ^#rfʗ!Ry &HW}<y64g3o]g'j}A}=wcg'2!g9DuffؽWy :(GT{L]mJ iU-/!%NiCtNtԐ`+~آ߉`FoQʖ̛]bP f2 Uz-eyscX*v^H>1nȵ:F -3[(0NZ Տ *\3 _ʑzɴ4nBj/b=5qC\%kG,Z( slE#/"D.nMҾ9v:Δ:`.QtTnf^H/>@5K%/;2R7\SeL{t¶ah-EROHxׯ۷ӂĿ(6|:Z94A8S|E* !Qړ!,2L: gQ2K2ڊQqWMH:r4x-v$X)2 UjqCIpL5͡vxX=0ٟv j2bNaAZFZqi.N9G Ӵp41̗ϞIP2c@nZa|0(s<6y8uqP8 tJxk#yp5A87I?yD-aasm|y7Т5 -|^^9X֑Hdv6'H֞Tlq;ۗ%|($ȵe78#ӤZ p:):}+|R?yiPCƢ=FwL5'oաBy$Au Lx[<ӞRqE[tRWRzn2A6ެh푰BKYe>H*!;=["?>ʱFMp6q[8SOfcЅQR9US& }/D\Gf~j!YVz\8l9z/FRtS~R=qNVf!{BQ>84M Vfto96-!cbnRE_m*;l-n=}daE^gVP 9`m%4rar5:;j\W6M-*o.dyM˘oa\Z=jWj^Lev)V(o|l)Qͅ.(WS="wO D4|{ Eo`+ȭb&p Je)|CMбXSrej_U^8I-M Psl1!JUڼS:&kOck@~tHlQ:կ PnG?IE⃡huoK2 s3BZb(z]:18zȝ+ET,j>򸃂j"*eJwc~&0 M`0+F3-~yND7ՁmEXU|9rjMJZE؁^jw4>jr8V'QE..,c`\~nP7-_h 7 jLĉ'z4-ZC|Xq |M?7iMrt;4_,!X'gY>=~ҧ̓* ۱V3 Nly`ȇ)@Bp"jEbsPIf};Kۨ/X4\Ck0ZX#7^R߿ڊ#\I2ǜ`mug[<BUe8b'=Po$acr7+l`K|JS LNKRpJ[Kܾ܈ ʂ@2w~،BqAf=GH/͹chJV$fzױXBVbX_kk3;$U'ɟoFzV0$W`ph51%|C~^D5\77 R}y}sžjg\:Y {%Jx@p !bk9rXWd' ~<"X-7eXay)AkOfg x Z`sKv…c@jڒ @C<3Gu@ݎ{?zAhMVdۊ?2*aĪ jUc>"hM @&0Ci/JIŝ-gPiH+'^ $^l(L5Mzyۋ<=m5z+ ^b#b[b_sve&*on|᰻,$ݶdܹin,9r}=uA;[y"Oc) $t*!l^T)yTN~I~a?p -E()A&Z"чUxEQmւcɧd55G{X_Z)Lf2g+qEoޒq|B-;BvY<5z_:۬Ý9 tVy6&DD#=5X xqj!9Сiۯޑ ]*+/O(Xq_hSM:rQ^NؽwנL{߱I( H\>{If .,|4?HSGe;9.MPn?ggwW"^F|ln$+w!D89i_S6gZ^[W:03p>ubk?X!"ܲ֘v]jVLѾljp({z|y :ɶB,meGMYQˑ .kUZT Ӝz USVEaJ++ &nQb :m|ӻBl̰x-*z R6Ik^^Ь[/_ 3]E"%dT"HB?,4߭uF<_QimC1Y`dn~0]׋բ[<;* q؟ʶj>:̎e[TniHC=${&l'f&a.ؤ tp'!u :LЛ5ݏ,3DbiHY\fA{dLo1r4:aPxQ=}87>nLj}n^X"݃&Qj~y WÎ>~I\~R+}^) ;-m_#_{Q|a{ѫe^.MQ@}DTR/ 욖 zk `q1>󏂴l$uT(YW >"o&, =VYe2!zY)'U(Uscpe1-R Πhfh&Z }8 Io35*e_W\nvP@v3]F,l͚N=:˝:#3'жSfw;QMgI@j"l,(w< -;HR*<;,rY|}Nt3()O$PǠ62L2Ϻʌ~ii~[}*bz| rSG!xJŋ.2Do* <{ǘ2ʸ]{! ndI4Έq85"\頹ؒ"f88D7[ T +%[.a U# ,=)xn7"ŒB7[ͣ}=M$=3,oYB#f1 Zߨ{tޮs4sq՝> YLMf?`5a#ll!u0) P0YQUQ$xv<ƫ/c?g3C7L±F.KW`T,F|7N1 ~wޘN2s1%3[Mp| s5q~6Oybh/|CH+hxo(=))Ḟ֡Q#wKyx:"UD0Un+mQsPRdO湦N"RJ0n[~ |%HcޝVy!#\R#QwaRaxQWo&346[,6\NUla5B"+p_Ir咽e>c3Jj 4ռLD ^TsԷQ%Q aʛ1@häUau |n0nYEJċ]X1h_ - i|/m opCbrn?nHx Mv, `]4v8#

G)}D-,~\ od11l$țRyqwŽ)LY͚͉rG+ em"V|=4=uޑ<ޗ"o|( 9PEw@t@oPP=v 2]ңgUex֦yM0Gm.z7འ8%Ԑ]"\?,_R~C4*1ݞ{Ox4MKGTF8gyڴ{?67nx+=7SpR?qzJݠa)JøP$D-Yz? YfUP±L>m&wBk>w- ai+[ג =PDɶQs`ia қt=QGTp5<qfc's cjR[.Q4րðgUZ #dza+0Ѹj#+RgrE'oKyJq+=l 'z Q6{)^$LfΌ=LD)^rHRh`^~9 Kќ߃N+.TVU^˭1)3Vݳ*-j`T:VGcPN=Cԇ9֑+.9+"Ή-[$0,ofs;P")]ć`>%qjmWWgtt0I]R=ߜZx=3/|`ʊiPlh we4HWusH_jW3a}0w[?Khh&5, [g>FI` `K!X0aguUGo?hPXJsc?ApMw}C&8oF 8B2l ;MWlVJiHA߈`v@[ˏUW0e'Hq~(!1k.x%"Kh$0BW&M+OOEЯl}Q&n/3s"$M5u t^Z:<,b qᖑQ` y:c (#Tt#+=G\yn?VIbU9s.PPT^b򻰫9Ϸi wl /Sd nFrH7̦{׼Aw[QSXlej׿&Q;N0 %m~ ?p>kE[ߨFV` jDe,)EU{ͩxЉz"ډUF^|2,0u85K2  i (m'f;ي"3+[F l? 8U, j;8LHPr`*#.c& K(򙍇Bmʙ m ъ֑Ƈi A_:a"ܣV? fG3_rCbڙAo&u0?Lrn0&oDpD+~2$A,gv]4᠍SBo FY}å:!5-e@/xy)aa­WVV Pٟ~.@7-4ME@m%.|pgKwhOHD; dOK89xBe!""lFD&P4xRޝZpX'15Qմca%;&Bs,FH\>G fh)ԑS3|q#yڮ]E*tB05%WsGgi#W܌:#p>6{޼Y)^s0X0Oh]5]:uUq,`|!7|na[tyMDAM`9W$?8E1nx4>/FIAmD^" &h]h-_Q- +/%'#+0H@#NM!p[ɡ9s|w`ϋa Â3M)2p8F)umgv¦LSE/"85IߞEr,zFNTZ)/i]:s:(־ʅIh2l>^bXi tƃk iv"Fvp+ҍX%0 Z]ѽu;Ni^rTj1Rh$ɣneam:ATtU5q̴N.2"f-%mu-n*J[/["t/ej/K=?)CbQ3/ Hk'ڗxkP $Pt瓐DW*;!Na#}x3b6B^3~.el8k(Ry =m  dJ)nf_p]Y._/*97URA\%$?a(y?)Vs/ $|+zlJ)< +?NY Q0R*5SݺTĒʫI m5<-#SƝBkS Jega3e4]%uh-~sUZ Rp"va\2z1 nk.Fskll&΍PV9?%wcKJꁱRñm쁌HT0: -6wA+ q4d.^AmE6(f">[j^Sjtb!6X5^fLd-KhgDBZ3dqIrud>9dfJsu & &-YX},:R RXws=(sGgnQɜxi kH GӋc#(dSZ~xk69]x;;@Hf U PHGFJm|hpbT9ItOx y ni{H2QcJ7!ʷ'BEqޮq N]iL g5=[Om[ռ5գv-c)}=3rUT749FBRdtE”u-PAiTIVvOcz4O{O*K-y޵**d_tqWQ^Xo 3^<ږfk=YI "L~4$oZ- ܴo޸cI|Kl?͋Q b#[aE3"I:+׀ V';=u/75se[6zIA2.wgcӨ[ua_yhm3PD*i10=3=kaM9fKW iw2J?  쩡{5(̈YޝpA`Dm?8/Tga_V $bR-S.Q·OEC$W#nܐa]+]31Sd 'E|ĥ"ed5j+J>{1;ҼEBJ+(l`ӏWw~6n/F:Q_>XqX.9Ҋ#ڂ5 1DĖ_@ؤq=5ٕG~x=+.s("mnE2+W,[Vm!A6^$ *ΧRA;+nFj_fĒܨfu邞YƶѤ]+~#օكϿmY/v'.},o&YyJ7|1|a*{9'fI6KBᝦ N30jYbopU4C[X0q&w5y' !^HlNbVK؃Ü<wrL'O(3Q-L3mOR;4r[{D &<\7_P:xnןD_j3S;jMt C԰endstream endobj 647 0 obj << /Filter /FlateDecode /Length1 1696 /Length2 7923 /Length3 0 /Length 9031 >> stream xڍT[6HKw3 )- 00C !%H7 HHt(H -^}k}ߚf<ϳo0kqX,0(W g bbҷCXL`W;Tr.`  Pu|Oxy a.by@ ]`N^.v6p>|œ2`; mA@@ýUw:r\lq<]+l2@7@/Bf bC]&nP+ ;@OE%VK s8>nP/; h)s= q!@;!:("2+ j#/7cVZP+֯\ Ĺ{){em "Gـ!^'O`gdk}/'o7 F#>|\` ĿWX||+;` b;[F`ʋh?>?fB#]bm9yu?)C<>\.'>>'aa!߿h_*Pk@pwzπK\0?n+ B?o.gK၎v? DSCFFWle  y\ c݀͏C'?_$H !n1Dnf~oWY@n.. j{AX 0xЖj*啶8!8[[u IgVC%aT/G=:RsH kG\?O:mV0J15x(,F7/*7o\)T=W&+A3ׄ.in*cȯ^_ȶDSZ3}>맬T<'aDުC $Y\eJ( Ø^/Vo)H&4+ݵtL|=S'ycB-gu*,S`a$V۶x1+*0QG"`9N~"&#X|d,-na8WFt4 IDžMSȧIki:{Rud][JZlc~Ks@ab+F( ,z:[" =D'Paw92Em>W5%D|!#HZ0NaxH[hN9$KP%]wD'';>_N%|Wevg:]YNVWNUJhEk,!"n$ʤg _ _a 6q1~Gۿ0:=el19z؜@94'nuՂ-^'sR}av4U!WiFv`I '6%<f$^+½yphfgE˓Ng)}^I)|YbG_3-}_ /cGQcl~ F3IEh"tq9=I &hTӐ_>Qg-hk~bPz]='ۼ/W,#:wuprRq9D8X͚ʈ{%KP;&›KmÂzPߦH}\x?Wx(]5cD~ZoYOjlCjFosM<]G5h^jG׽Kb ې<>^* c1Čw3ՕuJٵFzl<ψ 7tL]p.??hx qC?Bn)ߛZa?HsQy 28P;'mD䫞}[%"~@E6vr"1nҺčfWzj#:\)CELowf)Ti.렃 رHl,v> "`٨7]{5ꆑo.q!Qs&}nqKW|\w8}M;,mzr&u~RZNCYp׀< *(7YR% s=`wy|ʼ.oO;nǑiB+L'89U),hMYB &ZWQm=ocYRF!M`&؋s| d,`]ƾdօhdX3&أ$Ee\ս y[#1W7_/_f"t]5V']]thWf2@hUd]ԟHT!4^땹ljs}kSIOUdKWtZӓ=cj-F@if^XxjldVqL1'dAQ)=äixT ,M7`nAqk/wa!2\'AFd&ODط_ 8{ \V.O"׺pb=V.[H($ tZXI'f$1\ٲg{;0:80 Us?ʡH\ 9OuTMLD\)&q:yPZ?ѹMqB݌ AB;tHg>QL{6Z񚩝i^ֻ,줷Ɓp hc ~G 1ӦռKǬkU/>Rѷf2.{6Zoɖqϸ:TX>4O|Ryx^[\Ԯ \2wwz]O2} [\JꀋS+E5m"%~{+ <@bU,ى .9]A[ kUfA# fP>w憪 Mgn>-c՗ٯ8TJQ<3f6Q2i&-cM>ǰPq@$d5Qኋ<.bdkSfCY튉2%_bG~F~] \N) džRN^MKLdsmk5LQTFaz/MZcnaJL$E=x!x*+ͪܮ|e|᧋98`U:xsSx[w쾲~h䓥1$ 5SqlS̈B.%~8j,]fd1Q,T~~W! Qwn:]5O$ׂ,[{d\jr!, 7'N/sՃBrpM@kԇk/hZLKiѕCA^I+qK4Yx$]E4EaVsOSQH'[r(ia[EG;6 x?k+JwW&ah&)wt0'ӝtt7ŠR0{m< ND٨7ѣ _Zj$ lg}$PòJ}s-4݋i,4Ǝ~\$:<=lͣ -1ow>=嗘I@߇SwVeDPޕhI3iī/ Bf(x,&5 IyG9T6DZ|_4 sK }6ObLG 6!$e ;,%>f @WN0C,=ur<(PΖPT#X[G9-z8K$GΥ [=.gGUY>T_ JxS E2^j -kzt?TW[bؘi|'[c* y7h tPr= Zc FSBOr2[<o˾ M3V\Nȩ7zy`~k+yظʌt$̷A ^/5%*},a09A)Հ>f -+9#fO9{}+w[Za*o,O<מoWB d0.4L2 ߋSCk'#^ٿ>YEjCqjw+^c 3(`ET+]/}$QE?o2XBx?'HKRB|*]>}e궾k5ɐp}З5o] Yw;$rt.ZMZW]ux0"tz1N=dmW[nn?fg2̕B2-νśjTfP;v#\4?HIw; $A#z=L4Alb8?4aR XmT?GPg>i>>Z ?HVZ#و,X>}my$[ ԑǴ+Z+W,QS1p%ϭzH2 A!d=B T`QLfa+mze7{ՔM%x[С7.%I$ɱ1g9c4> AIGh(~3B΢Ã54mĽ%cc"E%TИ5( ۀ]wO`+o?b0 _i-T0MD%=TmtE)ރο&Z패.FmUZY9?y=]_I_ÓbePyEO-f20`8d-Z'RMs`n %5WZ U`?v'HߴJpkf,A` Sg=y \ɎqVpκ4n"XHBKRi`0:]KR ƑYIeQ==Z$*͝FI%ܮŬ-f-5 [!g4BG]ܶ\MDE-Ser]ǞP٭uMLt ɍGLNVuvRSLPI]s_P{.}`h /..)5i?2c!JeZW6}gyڏ}$ZxkX{$^Pwl;yZ.o֨jl߰ GQC6qMrG5yx:kSTbk\C7v{rt&/KF|O4ݙ&n7i H2 ޕ"xR}]r^@!gGF ,kv6& ߎ&R[*U8%_yrBʲ"drTƏH> ތk׫{\bfwVܵ8oc{TJ9N~!/S,))N5Sz;~-z CgeFh:ƙ6g98&70:ai7~UJ$lɛ,KCd[A³a;Hn-ߛTEoz>98骩(׼NP郘߯+6hwnL|ę \QаWj2I$1%ڙ>e<7%][+r)4.!D>7/jK o aRZ:g/9~ʉ5$%jNdHR0p0΢frLJ m$5}4rVutma,z:;x/1|žGꊭ)q#Ír2vzc>#"yH]Ц-wYc<_+Hԫ)‹>m=+R jTޑ޲/yGqVZmE i//ɧ@w];>2oQeE͡j/(GsR2AVcɦ9.#4miX LׯU:3@)E~cVbu({* 4HY%Bo#.Eٞ^W:S1$;#Fgqjw@rZ'ZjzI^UnaϐFE>$߮1'upy$X8Fy~`M2*m JAjӨVR1C;jXmy7E C`saϠWKh;8׭̵ӈ@Cΰn÷ۊp#F҅[]6_-HpZ60qŕ586ϥCi1  r۳U{y> stream xڍtTk6(ࠀ0 -H C0t7Rҥ H "HJ]"(z{Zfg}];5 xA|9 =cQ? ?;>  &b7"]a"!`<  @" Qq~~?ÿ8@hTp+ C >}; E `8@:wz W N ;Y;!R\0@ ECh&#b\z G wEgHzs:@ CVCxl > ; p/`sP0@0Gs0@QFFx$ s5"W)+NNP8ʕW0$>v/u#<>-l`pk_CX9a.nP(h?@]POWy}/g  A@D>`w(tWD A08?0}H'=?oOF_}?'&+A!*&wm06U t>;vK ZjDn/A?@RS*/wCnÜGs m BVj ss 6 -f^ZkP??e5Gp+pEwϿUCֿ|& ,#`/"~> ! (t =$u@#~~c"#"Qh N0MH~!j DKV׿P'B45< m^!6,޸+R1N?CiEҺ?U _FCfmYA)ʹ( CW.Nl,ag+ :̷-MqL'N mxap(Fƚˆi6fe[BQ MU _aEec/VLzR_uSSy'S'dֹbp {)c۩jI(bEb&"n44 pJ{.9x8![^E} Gϔj&݇wlQz=t{rF.ʷ4.7$aCO1֛Q";Y7NAA'wh}2Chg3hB@i0>†钆ťqdմs΂Ugl܃~sĬjPK EYkscEFY ~#FGHQ=YfJ UzT|5iq(怱#F$nM҅6{7y,7RKVt*Q[r]r!{ŝ!d!?Y^«i~m`,,}+z D2#ἵU{Zxs3CӧlBʍy`ўyahxLe$ܼX,d)jExpw̆D@+5736},2Z 28c$m"/gC=4g+{rvR Ga|w.!8lo2'e/Ί|c)R=Hn1,M{~ 8}0>Rc$d4UE\Z^ĬlZ?f'Rɒy>HO@k"i\/o |Y)6/p/W>~E3ެݙ!le0z(p_b%y݉k?NlPσFUpfwti#SbNpuXxAjʚ˃'e&s_t crSʗêVRP_Zs8H|"=0qXv/K-O.DŽ%r1U#X|e[v/E W܋8 {9gh ) '( lqBx18?&2TF>$6)[P$iP>z0ZA7{kaڻḎ7/61h(\sq!\ `d=lɴDP{sB%zkwK)=4 Nx"'o`ˏ0܌Hl}h~ҳ@Ѝ.Vn|[Q0.!)Ϭ;FO} Zz$oo;$|f(y|:A.ThlpGx]0#27ȚnuDta?sM |dt|襈,V*NWj&Õ?ۙT)t7Z ~mBԽ,SRR(9+q&EN3f0o>cHe3YA Ʃp|li<^51K8"lxLޯ/*IiDl *z9a"ML79 wEΐA*Qڷ',&f։gIk`|[Th)xdK4|7. ݅gyX5Ÿ#Dv,v mK#zʠBM`9d&@k}TܩIRD4B0.6RzTELn˴9um&PݘMOl#)b[Hh9KSk A l"ibQb.}Nz %,έG-?눙v]FF 8?w@)QxGR hl%ѿq/AA+4<8b>F{7$8Z}a/g(Ie4u,|*g׾WX pnpᕇ3=Z \>V K`K#+*-Wbp=6a:~2"H-Z^ j3 La3kMK3WB_}?]pQ";X2,:W9zRʉ;7AwӡW2ʼ;mo~_$ 0GRu@QW)KcG] sy攖GA)=^3۠÷IcB#G;$}vsj>c'(4MLpǎO|u⥑dRuWW[q[˳= wФWWl?,Z=2Y,ͺNݯ@B%.F/Opߜ\sKNw֪tOfɄq@t.e} K.XqoLA)(*F8alP^Ϋgƽ0˭)obr[ |[xѩqުr.TCQäd)/Zҙ b8|rI>Oogוg1Z lq2FBƙor10|sA7MCr0,* Q)el:kx~ œ?)zǫ-`ӧ.(uGT &w|dp,xKIxHhTlsOۅ,\4at?ڤz%ö*oyIw> Rц-Mţ߬ b R/!>p*E*__S[1JU.}⢢ێ߲n jYsyʉ-\b8;l3F#j HrYd J.'ܺL 2f Dʮv&Hx4lE$SH%_ Վ7|UWkۯ7Z2˝zXPў~V8 B*Y2o?q)esAI <Ʋ}DYNp_qgj,%y}lRA##>뮎w)@碀!Q/MNfNfzL)ܰNɭ:YE]WTeNX᭦Eb)zJ/ʀu$$JAjZEAlZI:\ҽ "]%᯾k|1zׄ_ w6CsfxKמ:Vu|:.{u#AMu-1ëT4؛q<y&79BnmAK>qQ4d2D[f(vm+{_$[8dV!/ aJzc2삔{VƂBqq!̽WfRqR],έ#} 8ߑ 2% pϫXʆ/將,nrX);oRk;W ,.nei>5kԻ0'SvηmՄ;SNGM"MWino = 5]הcN _8?$Q+ȗUiU&f&;՚nKSH=\sBt]JG0*+El[}Sa|'|C!u/r0ABܭzcwHR&%۴3f.9ďϵy|"KIFzPu`zJ<2}/voMZ-5S.xZ8=fz!X6'x=0Q N'vVL&N'+s  Ed5#{ijidz@]BqX !AC|(!ٸݑi8& ke2A=7[k_\ mqݤX8I$P  PVaQjix\Ҁ2]s RSvVK.~\ȰT gu乚h&! ?gT08 6QTݥ!k5'JyYX/up!dU esQ7mI*Fu^l2Dt͍ )_}; iU/1}7_,٤2WX5ISnLn2EU]H?C6S-}2'~E҃TF,/jNjoxnO0ߑ0{ѩ} A0.Dxufx <RZMGK'A-Tcm1S/zOD3KD" ˳e#-m;w 9k_ n",kuvի(yn p]endstream endobj 649 0 obj << /Filter /FlateDecode /Length1 2210 /Length2 18676 /Length3 0 /Length 19997 >> stream xڌP[ .Nܝ z9sf&g~Z{6Ċ*L쌀vN"rRLFFzFFf8rrU 'kp@G ;[X8 >dNrvigk C;n @ mg t#ww03wG15_!-@hbgltrTNN 6vftW 's24U2@+TL\ 1P(m6ۀPwrsښehhhobhamha/q!%Gɑ f1[;#_D-}wgZڹzZؚU=Wgm>Dpdf@'###'3't36g+=_J5x{L?z[?< ]'g?&N#-ܟb,_?Ə Ofbgk_G̠ )Asx~fa|ffc0118>7ſyW7ݏ>˿g B Xv PtF6F?L_.(;[[KOrv`M5j ?Ac?33-pp(Z8=57k [_7̇#},-1R?vۙllCCw@lO4k vN.vp,;A/߈ qD N`/`0AL?  VE"}p>A\8?(Aٕ*Gv?#O㿈AٍEl:c;/Ιdnbt:~\>G?L>3/h?" |sw{s?,>d<?Jq1#`'هGaqEILlQ 4O\`?<4vvpxK@711Oe]PCIfV=1CkȟB=[%VVz^iܘ Ft^Ө|O2,&f-î7sXt_/2%x)Z+ϡ)~#MFGBcsr u etV wI9^ NN$?~Lcf\)N;9A8vǎ+c0 #~ 2rW ɔڛڋR6kHي#؂v3-iXNV-`a,+/@ +>+>EKvd&AO19s.n,IQsiv,2!ƂN`W9$+rZ=`ʩV$<91r3)izٸE.F3C%C0j)HE~H}EZCY{Cp%yfMNWۂ8ɥZsRp^y(3scA`a/,S3/4R߃ jM#˽rF V${Տo DHtו׻ܒ%Xԓ])D}'V_V8Y?a*`a/s.-4Fsɯ!OQK:!WmJoz57\ێ(A8|Zk?wD(u), `(#_Out_:*6 .Fc"۲mn!!,\ǡ |6reQV3w(=Yʪg)'LO]tC:!Ŧdۋ_?.#OJn$ƼQ+>+[1_.Wlx.4sJ2@@\Mj,X#Hhɨ蓔b ~J|)p/H4āo<5#!_E\T0*'fhzք\7?ynW9|3vJ*Y sbv$_ Té\7Q넅4 R qFOᶴ"|KQB~9~p"d1Fʜ4'/ Zuj*" BV,sMY%o[۠*%?MZ|Awѭ4;~z1{qΤtZ}[4_8*u2l܀ O{!V0I+i!(%PYGꎻN.K@쓧;I\ LE.bj|&vv6Oͷ (1׺F]0+B6O"!aqOmGVXCI y1[y,J>l$<,+vOE9~3V< _{*@"${GY}0qCcI#!z^/sO((^}r, q vb%dÇxj\W^{c_quLMY|mԖ,ۢ;$eA-RJ7L݊”ubs94 K;w"<XgQc=ITnO\ r˺ۡ4M??Oz 1]a( �qT0﷗0Ι #|1A)oLbuVi^b24fC-;֩ 0GJr$Nnup.805.wNv YܩtD:8Z؜gދjL,>">Q(QK=>p?e\y0XTk12f צDF˖9 D6_aĔ>!N84ϿW^5T)``<kNƜr˅)AM&3/oW¬Ѿ aT?jPJ]Kg Ӵ~i@8BG.AQ*`^؏v*5W%+W5+= qc%p0RKS\fJO'JK_gtL4t #-Z.lG[^fzTq& d!\- C7$+ߎIE?bAWSé&։I>bӥzcn%~-γez;9͹O- ,`3DC9άW!{Y{#3?}LbVݶyŝAy1=c ;&ZDDy cYIE*,O~gcK%j+s䏥!6 B72lOu;PH~ ےfnDˈ_5/M3N7<9 \`:D65U֒=:CLn4q W QBQ^Xinp+x~T֐+lym'].5}q@-#BGߋVIp&ﯯ̘^놜 G|ƭXOC?GN+tIt]>:Uwg344ɨ Q{L |y6Oy-G.̾BE`܃+0^;"Q)SZ=f 配4Y[c[N[qcGgҵ讝Eвo}K_6Y0l T&Lkhęk$y\S3K##B_Zt妦(J1Az !Hle"B(]a*h4 J 3.# Ϙq(hO?_߃N(M|wmwZʐ^xvˡ%yFq#r;8d(>DydscTR&(JٌԴX9!zЍl&\LY|.j@PrH %OJ ohU".v="]aNSo Q"9z ٪fM'w_4b1Uf%'< 2a^eA?a ꢻ "Sss qrڝ 3= Ք 窌* #D&l/ƫ[9YmL僂qx '-秠ȸAxRTyP>Wϧ]K N@踕}9z!hSo}3k.k>^rI@;83E騧ulav,yk:a= ReX\n|)\]UT q-/>)+œ:.À[%y ]=J,aܱHL 98oV /_ւvƍ[Y &i+5ڰ^u#1&gsh1)V¦Xqݶq\LT%_VD}M\r 4Y<27\nυ&A ~^٣NjSu7qC SǔOg~/9ؚX`; @3îڷ̀Nۮ D›ŃJ|dw1է yw4aH0325OB*͠'kj 963CgKP֊6(MPet}Šn鍙Xǁ2̤Op01NTM/?ihu GK;H+!htzf蘬f&XvR۾RZ@0|emu֥-0|Viqg^wZG!8^«Gj萾% 9\;!O{W'Ƥ 'T3Y}^8DE Wp&οI䤕bsҕtA[Q#Ft%ݻzz2N穝K=֎V|bE^2]ձ&NiFZ@K0ab w?K(mT K6.VE@9Dm3{pxV ~0XuFH~OvfU{AkFvUᲨ]B)GWsÜRVb)S LwO|,* +i*dX90ye [@@TEB0͎t}]aC(x8w'U@(jݓR!TqP1tC,h ϼWq.a҇rcTŴ.١#nKuʁC~y2xe]OJ .g({bNM-D4P,dh9M N'tPL3/7!t6S}s(}+y56̑Zd 4e#2 Cy-j { ej?6Ȍh՚w@SvK(3|A,7,@--].aHV |@|HYRAjtfbHHP{5삘ʧs\I;\'p2Ъ| 7~3=DwMN"EXw` P5꽅O+u]$2{\F5*=z#0AI+˗ѕ [Z73H~qIJRrjhk s4F<>w)gyQZl,_~$ɹ.yQ E0 cęݠd0jmTN!$m9s{ {ͬ}v=g,'}n*݀ԜL.72W<#]~/#⎇M+{9 xlX3vzQDkw;F;,=X7ob9HFH#:Ԟ퍪wTYYSNg'Cݳ/VOOqrińǴ'(ݖd)&amL|` U:qdC-.#x?.Re0;&e> q7K&1堂C9hE~ކ̾bR;J3ѭZ瘖K%o3 Bϑ.{Z~-K \c(j'1b2{ qdrOZww3ϵ\8o&-G5_꺙8a6VD^޵bk3[pbd+{r<h>%F'w9Jl?2A4Y_7ONYGz(K܂1Y쑕aGnjp#dPJubk$jIoU\)_MU|Nj9B{1*tQg 06VizGtOzot-A覱T&imΤ6&Ąf27Cxm2Z$1(bPXժ-xd$P2Ƴ4<6'~'23c%-F+*6w䝁8^yNA+ophn%6O9E \3t*̶1T &Aع1 tnqɞIaz.`P$nUj( oGu`,ZlD6{0+IA#݇J6pK}l]f鳳t'm{=/ ڮ;iƉJ;9p2ˌW{FqЙ /ڄo$ke +)'CVZ-|ا'Ǩ|I瑈Y]L`UJ[ b+i V]3(\Cip#[T /, Xm!pD-ɳRơ;~պ.>ц `[Rڌ#F00^;>=31y n3Jt :ɪ}3k L‘L!dd'mKN 1(]yfwf}rOUL6:-=bi \z3U&d=zjD^t%WZ^ Y7EőPq~,t>8!F2Em*x6T{?P nuR*Vm+!I :[g(A۔M=" 7[кG O{9g8lb4:cͻ,-4W,d2N#_ړ8{e>  Ji 7nJ@O[7diߺ| GX#EK<>Yg8MO:|_[]n_B2Yg4F$`LRfz_zaUj]5C6шp)y]9r' yt%Da0d_:Y>\z1)/6fM7h]+C P(TQAA{^ ԩ,\6V1/=z/(ܭ?뛃\U[MPrеNp4ӮvFu^uiЈ9Y}q2ΫND2M%ArdyjF||7`Ur 5Sg0(RE9$:& )EtMoչ>@^TG/\"k[aH +FgHT%a@¨N`CսISDl,\ ȥa gH o6xEr:a%Y2(֠4Ь|x|0/f%bl^BUra֤5p"e DML*ͬ${|0\]߸7E _IqyHezb+L7~88(|x1$y/fŎV\6}LI<]Z}xoXnqӛAożHř0HQl ;e 0Pk_,IVssl8of#O{F\MC04B'?G%&-[=WZyT]T_`EN{ݚ,w1 lvČeux,tS#n-U_3<~Ƶ>1Gsޱ7v VRB% L5\tۯAbU5H\|+zZr)#7;t}9!<ዤ f$;zBd3aB@ `,di> 7f-Ѻzbl.D oBDmmZ.,}JK 3AK(o7hu°ɯ%IY#80f=q>:Oܰt_.'po7~>= |π"T}{  0ʒpHng}2;Auu )9^2L"\Yl;1G>Т7إbrtf~$KKw&pj]ü,,%4f'F hL_h bʳKpxIh.>W)Mؖ%WЃcJRϊ;Awʱ0dd=E6>/'0q^tS0#uxk8ͪYqj$y921+C6BEER%OKaW d mYY?t,DiF.LzҊRVWS̛D)N:;Z,yIPY`8ik|Tmu54 6.nIYjU݇®`X7TdG[h#ϥ ׆((8g.ɘ%+D]S%ѿu`!+c~A**+%ws'r[v;e)p[YWc[crAl-{/&ʭL#%].r2Ⱳ%q{e/$냅Ψ*owA 1e@y`!l+=1 ww)D1-'K\51=EeR no#JL=R^ -~{ݦ df݄U5+h A̳xIU4M5ł\0yvD#8;Ar9u^.lp"+E %e F-Ҥ1 mɒl"zlڅ+g_ᤔ!H* [G|*Py\7ĻgnUgs g3Jl$8k@adMJKMm(2XếmWuz/j5OIa9&io6vv_#a`P-0%oDHO_AYDAE }"[h J1a&)":'|W'qr׉>}w5Oe!#Jo#p+wJ" =A2shlb@Fh9{$7$,n-'dAG6w-Q1ݬw` v2gfw]Q%LuJ=DP̅`ٝxz *E +qp-g#ށ6Ԍ$J2ֲ|-;*;<|(Q' VsѦˍ0|~r)j9b?xgg^t"20ZXPp0p&8~^ #v\r$e"4S~ ?zm~9Yj[ pҩ |eH7ntR`g4e19%4=g"rI|1~Uć \Qو>hClDE/b& ,&N?6ṳ.>mK '%i+!kZƍ*:\jΔ.6R/ſ>9q!tZK;T&pwj*{p𘷊\oTۙ2DGMEϥ,ۉڹ ",n -kvLg{˰3%~yMx] #3͢$#jcLM2ÎՋ'~q'|_$ΞoKVx^>0?-PTT r鳛nk/N:zsڰ`^Y#M|Hx֢P֥)_AK y$ҙ r pRpS;D3"}(oe^|zlRle0ߺk-!)cLn]MMя8"q|y@'Zæe*3|v * 2kV\嗀<):з.,XNjD =OӝK$QYf*CSABT)͉}t9_ 8"[qHmA_ܕDA 0C@N*5Yڐ?m`G[,*u)c+idR/ 5) d#?w9=K93%t%qCT"0N]U+קZw ! h2ž/!I.ٟ)}ⴂT"L Fvp[*\Y A.yN{͞@A;Ty!tw*|W `![;oOMVr'VI@blB3<rgZS\/2̎(Vժڜ0Bܲ}P<'dKFB 4?&NyꩡS#Z%zͽ  ?q/'4*H7ƇԀݻoo_p}^/y's.wST[ v t@uhd}]2ĹzAD м/{/>po! FsRs$m6 Az^&DIc}]wJI.uVzEgR{8a 8Ggk#2" MtjJJdK~AGEǁ;mp7YS}ظK]nي 9O{է[I*]EDd6z%r%V!nޡ߼d`0|x&]{)oM9=dvH "%Q1_b\1j qOu n󁻺U2(y$Gܸ oFũHz0زLpplZ펒#v>]Xo}C]:йqc7[% zzk{B"I([̾E<&!@KZ$HaC^d#2됹 O'eE޷J jkŭk8T%S\~ @m32*8\?Nd$.ײrI 6D.:?׏tJRKf1eQlж|K%`Rj_s{ 7(?6.2m Q'Lǫ8 ޺]6 l?E[24ћ5$9bN '/\ IO9dx"{Y郗6Xy}iT*9v x&xYp_5 !=]UtC:@%C, -5(,%&)ǡ7U"m F_d~^d7k5*k=%d1an_2BAtħ |}o49A>"ԉB)V|9q F嶔G˥85[ Jg7#5RYZeeG X~,>B׭'s kT]@( RDAdNeYhʱph7qO[8G ;8]aRF YBkCe(*-2jG+넘 Zmf=LT⣆C`CL4n2R8ī1ؾS"D詄n n~ p Ʋ:|[; Ф@PD=9»<V&K1yNPMMPV*z R_JoL\c|>C_N]QFܮ0!Ph;Pqf*V끔hH1e 6nn2ZF2KBJ5-kwݴU$yؕRpz|lv3`f#`y]&_q zK۪_ӒgV2y:]m_*|vIk]i&m$e,j2{IO"W\ ᑥ^ip=k A+m|L`r:]yL Jv/5v` 37A-ޏuJo$(Oe+e]-We={ ca?aă#f~TCٝk40Ev q Q,:jmϟ{ReTs$W5  Jל iK Ъ] S=hwZ9eV^IIIiDU9}ʤ F5?؜iY(3@BCc3ғ!S]#W;tE50܌ktTcO(dOّ cϡl}7A*xՋ)M5GvEqC'_[8!Xgۨ?;Y=5c 3, y0 c[)К|A+yP𐷰Q,jS /랢+@B`Y՘`l d;6}JHSO:}|'.{PbI$;#lI,9߰ux bO]x"1x%0˧Uė2>o|O9jpxBE]b|;q 8,` 8B? ܦ˞k4xӠpBױ(l*vkteU.V` 4ZAn;^5Xdi;zNˍ(#`)s}!E(c/j Wz߷WfD{]ߙY\ʛZ*]XP؃0|3YbM H ѹpc4ZX+ H~C<{ $1쭄FѨ+a5ajلy9h/y`Ξ+uqk6hwUicsrvG)=֞^_ˮXI΅L,.Υ͇ZV./L8nB= xb@7޳.dP)L1keTb3e|kEГ@$fw_? xׄi1W47od 攺$6!iC3mCB}XÌ|ES zrt]r¸[έ6loY33(Upη IDKnJ mGWx?s5O{!pȀc s4.o'aFY<+.ezܘTδ;STqURGU~F"@VhUSJ[m?}M"U/^U Z97sʖdR*9'wl]Sjٿbq q A[q-2l#w ?!J߿z-7E_,M<6%I)plz%X5:j\SBFf` M99,m]>s|E_o&Lp$1m\Q_{a>"V-xIzq3ۣbL{Ykֻ&}FJ I'/U$q u;qi+Os$>F极UÊoƝ35sbߩьK(?endstream endobj 650 0 obj << /Filter /FlateDecode /Length1 2569 /Length2 18352 /Length3 0 /Length 19841 >> stream xڌP ܝg]!{p@p79ܓszY{w=P*1ٛ%\Xyb ,ff6FffV u+@'g+{;Y9]@2qc@`eef23މ nfeP`(<,,]@y@mJ` b t25(XmAMmjV@ Aohlhd!HCprNn@3ƶKcD[Z9Gfon6V@;g P(9c,z0o߁r665u0󴲳[J..c;߆6 c7c+c_ԍ"*cPldleFa@m3ڹ8#'n4ݓhgn_dnegf 3W& ;+GW6  :L{:RjvwZA݀'WXXfV.Ÿ 1?tNV=fI4afv6:b&-Y)%U%KG)*jf`08ظ<ll7yؙxCԥv{^R-@g?0s0~\(cIo ܺv@ vT UY_1hD,liLXX흭~,f-G ɿT@oF ;S{K 0vr2D`M+f@hgr;!>RNo'I0A&?$b0IA,&?$d Pv?]eW@ Pv7(ʧA&?]e@ٵA< dbq1umRVPcgS+iژt O(jG 2?l? P P0S{W[bkbM-hh\@-5fnNrX HYj%_ տ AUa *уzdB?8@@/=`?@UG z3Ya:`/L\ o _, c3M 6Ζ =frtAŻlrۿ ZPPAPVAPpE:܃NT.⿾y@SE{S`wI9=tNGdꬠ [>Ե Uذ$'gՙvi졩A"xBu}GݲReCT9=2hD:Oge ƅev=gD64[w5akBչO}|[0Ugɻ8zC`)lc|gWO^iGjܷ)P>kOȁy ]R*_x`8 .܉L-V{kFFnJkT|akYt]%Szǧ:ֻzacCs\0%sHM-jv04z#x_uӹ垼 =me8LЛIMt* 3"铸r Gjy ~7i<P;5,{2C{oAWx"|(x 5%ؿ9M0R3RCiu_n0/l0 @bHol27F]-R.L* GpE, lџՒ}BrjsA-=o(qE7ezI+5uzu=G=jjQ%eR7-(>v%lt,Z3L䮎I4Ǎ)4zض,`Ta !1:aT;{3p }?ff*cFn6`Rk-'j7qs勭E9їnՅxI9?".alu.5'%eHFhQ:Pl>ߺl}A%roEe:"ϥ\*1̦ 엮zH:vLώD7d Y^gDdmҮA ECC2#xQɂrJibK}4f.JOq`1 ]Q TdfqL#h*<8~KF Sgv XOqg*ۊj.z$fb##(!PW$ߩfATs9ۢ&zB~X8+>=dثT"Sz^hUJ(/%dW+:Fh3F` )sT0DP琨sв$6y#ؤXU9?l 1\.`kP|?ftL!dd\V>X0ըzO-̮!Ŭ;vx@l2XxI#P{{aa)l4^ѦLzLtf)s"g]N`z:!|Z\u<"W{TZˆ;WnQjB'Czm[yÐ8|/.w9uWm*1uGT[FRR!ݝOhXG o ]׽OGfLg~[m](@LF:_{9S oig"#!bv/,t0կO,n9 "P>}8;"^q:S%M:J# U#HӶ,iOP޷ F,#IdJO^F>ƴUf`i]d-^Éi!sji mr (4\r81CjuZ~}ZHJ't^;3L6sO&Y F}bj:%XXV,)܌E/wICuqtSi}᳘/`pwNDYTՖk;Ϫ঑=cSPvjZO9RP밨3SH=} ƴDnךO4NM'`!0ZQ3&Y?k*-Yy\C,MWty[EaBB{@* D$FW3L0}%-$ܬ B)l\H#0?$J 8Zĉ8R/mXx<"Cs{ =0iܛ;ٓXr0wK>T>" 癗X.1Ob\;9|9daA$*58:;iV̌B 5JQreC ddPBW]N闰qB!^ YpSc1MM"icotAmVfd $j} `޳kߥ_|kFDڂ1e ߨ ўe!BkMCvyOܿ-D:sb  nXҤ="BZ3<*sx |h0Z7rNs@,sz j(Ģvf} C~bDSWy33 }ޝ^]ZRJeIt#5/BEy!c4DEcwٵ A℩S6}UshcM $5%@ߴv>9gKhRyOS9@~Зm2c٣s>viSOAR~}-@1kѼY9!:x䎾U0ޔHCY ;. D1놀,$p,pfl)Z@GlY_d)Ks߷q>xa4~8je {d(Kx53%WcEݻz]JiUt Z@iRʹ(_eW:Wc =,/0feY003M"^t6O85tV㷰HKЩTۻ?1C/ 0Xv zY:f5Qpub?%/j՗&e:yOPIf /s;˓iɜ sKvcf>xiAKU>7^IcќGe8@eھ /J`$O-QesEA%L`ہ0ܽߞ5NӖW6t6Zi:t1 _zxjM 뷍mغ~ |U:“`T7iE[hfp @zDG]X7x|Tj*Kٍ꣥K|<--?"6e_?jz'֍L8hiۚGQtG: Çȇ]yby^j %;*n*R|P*>l@Nh֨?/c!a\-YޤIp* Aäu&V҂6]=M4tA# /^w@nTxIw$fit6dV5nNJ߱CaRr,?mX0ex(Μ i ;T ,|#+0N5w'~UoBdZ{>| U21T"}YUDhe]`7c\U 39L|NXzLD|Cs28P xI:!J-eoS<}pFhLנJƵ%[t3|q x q5 A9XMhY㧂AE2v1Pfs<)r&T[32b &D5gQy+<xn6>Vw̎*`3N;ҏ*n^W.+WqF%Ɓ&wmBTXĈ}[Į#ogɜ>8gNUCz">?2p'P@Ca|^H9c 3{.sN!05hdJ.NWR&eg;a TTB#:.aINYPH?3>Ɠp܊#B9FLF0_Q&XY K]ਖ਼w|Ċ78woZ!:Ģ"o#TN#tJd E7-;^ap#0FM;-pW Ӥ;XA/^.SfnF#ؾ}'[bBOj#e9c)>㢽̢VQ1z֡U%T( tnd-se[I|S:FwCd&ΛVvD%l}qZebC[NZ#tu,u[q9+bb#/X<?p@|q7X{d-b"9zt!&%.,njM#zMgw3NZ6ǽM]O;m $2 riM'<.[<k- 0Cf(&6k銍 eF*e0,M.rL1rEa0fȈUy DoV9)%PK÷KG!0AQ#!rCBߣG&K9rT41wl-| _ :!dr6ET@)90Je,-Ћ7HQpjF!@ƭSz,zN2k}o̴'HQK YXέTNt._hi6l)Uv乒h*-nC~%L$41tQ%2_hPjpFoxom3!H1Z=0Px'vm#9i <I͘33;.ZO굜ó%O)x#'_Rf |q[!D 8 ?_VY܉nꯈ*ˁ/$B!pAf臨p@LC8䡰(acqBo{:V7NM^VBo\b{fh-#;zCv!bU0_JJFnTT%ZÚi)HQN;[ >ܳt;kZ 3[ aFkC֫%نVuTAץ;68ٌ'M:{X3fnF"!#Tkek12h6TZ[mf"*^sA,}XM":1U{Zl4Q.˃? Q]k.[cV P%;N#ِ0igwha$β暺3i b(<{9M|dwҝg$PnXh/.`I+vJm1DZ#C"/9| !d嬘/T_U[uM5IC)kw?M&4cŇSr;z>A=\:Qz+)iV߂<: ])mil -gAQ"z8{V<%o>r7*bRIcF(ZNY HDeEY;tߡI~=mEY,~sR.).8>kXG/Cٍܿ:ŀgfe,0 &)5oyQt_.T%p=/(jy ZӧȒFu )x=l7$KP΃U.qxk|#i?8QcsΎ^+vlru6ķ"ޱ"_ mB}ytnt]l&* 6D^@ƛ?_Il{h/oh|#^Tbd&O [K9~!q<~tVH mM F7ۮQ鷢垣]戣,ot;|UL<[T2eVvxu˪ֈbprJصS;1b/LA*[٭w"5&XS"gm)*N/idLFVLJxqW&32im5:V?J8FBbI벙\,YܯrJJT1­ja&s0%C7`SFP0ʔvKE&um+R!UƲG!T{QFy1plH qŻ8wI7?+^?}Vk-/I\T%ϏyʅXd"~^9v+AA\\K*nTCۻri9 s@rUT}4saȟe1Zi]}7P򣉞͓͘-MZ< ӏ[E C_散!#Bx*eԩrD;Xھ&xtLskSSMd+SB"xBϤr[q7[qr y7x#*=-OBU%®Tڲc|-u bw{G8l?A ,ܶɞV)flͳ/ G]$GQ:;_.x1&Za( HC+OLF$I.2%n ߳wQԕݚB4 B3DFX !_u0X (=I'JR~"p܀(/־m3?a7YYWkQoP݁]-G7ēE~nOmi*AnN]ۚ5'M"S5˱yvI'ByƸlƾ=u+l ~ WiX RgzFD-"@DD0e~߮!\ i+gQ_'-b,nln;p5.afS>HEy;="@rS44,wAM'.W^OZ$?μ+56IeyFo 4m{] k7╕J󠥂B o5!~ ?;XoAyS>3W^]b0'VVQ_>‹gd$N@Nfh3b YChk1'k~=吝 R@ϋ× î 1_VghM؎5 5Z5 alඡ vjVI˓r >J2GbWIԼcϠ38,9+G4:k<a) 4n ?܊M\d6ٌH#oaHapkyŰ.oI({MDFFu= E׺k~lyy4b*OpK˒L;&Jݻe _ϋN<{1uBq5сNv./z, ?*F7cTخގ)LWLR~DB=E;?ۖMb)W]kIjHCIbcMyZprf˜Ann g7~ٸ&}~PK72%Ld;xx1jDcQP_P{BOgLzY8qO΁V <G z(ǥ)W:Hky_҂39ls >\'fn]u!t +eA;=PݹL4xnigU]JO}_|Sܾ65K(doĄdQ/ҽ>&9Xnb6ȡTj%rRY'Ϣ3ps DX-w*E ul+[F׀1ǔ+!Sl^+'ҭ*[Q! \T^jT[I#'~R%M. !Gya`o rƍ]r]ܬ%wf 5NGȾ$mf?LbF1$yѼQғE?(=efWd{&.;9eX!m`3 ?_򡆭tUQ8&\BWgh\ZLubh˸thnS7oMvynOօ֚S! !(&ϋpow ?b5s]SM,+dJWƺ$ bƝ(o~+FG[x+1|NB:5&/hH#"#u~Dnh:֖("5,I%Y .ov6m xO?+!Qf4U[_(RQ0f-%9:hͦCz c[ro8xRK$d)Z !>ԆBIӽOv&+[:K9KKpO3WvLВ"@R /B-3GpsbVx3c*]ri1et eߓO>X{vJ _G-+a=Ք\t25Ώ kB5ۯ5jtQp;;:2C l𛱹#ɳM4 pL8 ?}$; :bٗ) OeC&0BlarRz[1~5{Kn%k1aslm95UI`wo-,EN{?6 k*d-ɯ.v°VRȔԓx*[v @ voͬg1! h;u_y/s ڕƖa~\5Bq<4g')ޡMM}#DHB3?$NS}G?emjd~WFM)>waR.Hto,2=iy{tJ;R=,C27֜CRTL-aT~e#Jр ;Mso+*#.oV쐷}%& ]%y65$T:*wG>lXA/srE)b~5wgl>DcMLtj4MOiyMrPaoI٥j;W1/{G7#{:&a5;YJ|X#g{ϚusbT1%p3[L[ JJD)wk\w֌'g>d8z\ЫOtbzY*QPy  )8pX8hAè@[3&[ʶSVY؀KIL YU ٕJERtZzPn7?WK\ '6 q7YFTN.j\jO, mB;}))ڏ l *qjPi\c6O57 -P+G6 صL-4on7Zw_Fr5(U.=LS3(؝*QD3anDeE><fejKr[ҍe$K%=^<.J H G`z(Zw*Z,븩orJcT; -.: (d4DMBըb\uZ 忢dz}J&6ڮb} UY MZau9ӨtC"=I !N#T^_!U8VsOn7'S}y-)ejD^U2f'WTcpfO4) .Ǘ%WϵNԗ9o 6: >&C1{&\Hi#-ZO+=Pj,3}hMIMV<r>z^B?kpFc`.KiMx\1/LbRjvFDfdzS\Β)X|;"s䧟\𻭚N=Ojf6_7܅'aKc߃SLELjP~ ho@1] 'Յ0]tG8(z!{`JDkbU6WV?\ 47UD!`G.Iy6Npn ޳p/"+#{L^B҅|рī[{[ ͜m5iyZ57bgu /W cg(̇oe1 Qn[xf1a93B8>R60,5wc-dz=ggx#ĒXΎȏq.lHjN멗Y- {:圍ќ߹q3$|Jx~;΍I)+de$/7~ ܋{2op,#"u,dB'lظaf!ka"iFzuO̻K)&oj˯PT w_twl+ Xo̹ >w%%I|ܷTav@;'[mGg4Tr ח/Zȇfm{MgO|lT*9!uMBEfVj% !8Mq\ A@0ʯvc`S/9?Ew.hF:#U&0y =.Jmwj=ID3]KC3ĚQ|Onm ;8W=`Q#^ s(:f[k585}=;,UrG^ȶKsJqkp])'5_;B NgxrNIXM" ^kŸ rw|C]c7qG$,:QցG?p^U}h$H?25F&tji-?B%W6HH77n@r/6F5_Ihe'@e 2iIj0х=_xbC8| /م >;TK, ~d^]|C=VI ױkYSA+<4U4  z&/76ZmXl]Yl*%C2$3^ NI2 %aוYjs@g#+]IB0S^}YOd!즃E ( 1=UDnb {Zà: c;fRu`TyoO#\ o>;?u%=|%xB$Ύ-">97f&0iRE0#ZXsĥ)QP^B'-?7.:$.1<}D?^ RrWim{ ̆1x1QX>/+ޯ=[Ul5tW+1&{r8wPvL?#NVfp<|жƬm zp% \@1a5:uo@QEM6GIr>8.:pw'GEr2޾(VMЪ$wqŮӑbSTt{זƅ`:3\f>ky*?EE\!Cz-&qXU=>IfrxjtЀܘ"Bʍ鿒ZԠ~?5aH<)Wrx}erB'|?8aкsz_ 8&) ?M@S&=tP)DS{o )H6eqc VwZyG@@y{wro-6& >}Bh\@̇~#QO,^8sӾ6̸֝9b<}'0~'L&+]a$QxHYZ{ڛ\auQR52jlԜ#$p'0M{N٧qŮ](J*EDx}l+-s =wڣ{Z#65͢hHk7OY!Z|ڏ´1%BPT{ql3-IJ?XOYBfin{0 gAE zFGtWh7! 5gZhtf@e' "T8Aoj50Mxd6  1mt l1 aռ_@NxmKw}d>Bs91Qr}Jl!\@GBcPW@y)Ϊpd©aRxw;*_rEV8YYD)U5G^r.EשHOI0_fb:Ux["ۊ4 l. "_؆mr"u@\~V=f3pKਪ\6P"VsK>]S'#A#(x{10 is|̶^V=2cM3M/C9r S+lxDqEx=bu7hҀoy^FQ#ż>)EDLiMgFK;nVߕ E"b=I{Ow'*l!?U^R!Mؿά]+Hc$.2ِ}Ѱ'5wSjH{23vjy ?i1՚ o i.Alk;hA%'9q9>b UP X sxv3~"X#l6K6 _5ԧjQK XԡlvSפ0xZNg*o_f7p( )=sGRP:Vu:ɫBkdgL_޿.L?Ip)%,N5&7*?e +~y9;jO TgR h-ݍ]MlɾF'2'o;Rp1dɕny"'3wt"1/#_ )3oRJq:W4// `6#`w> %7*@[sN"(O'O1V*jY'ep}~t7i0endstream endobj 651 0 obj << /Filter /FlateDecode /Length1 1876 /Length2 10647 /Length3 0 /Length 11812 >> stream xڍTj6Lw(!!1C!])ݡtw+7s-ւvYQkJZBArP++'@ZE[[AG vcƠ9?A@WM SBJnNn''CB;PB@.tPG/g+xeg+ r[! v\+WWG!vv6 Zvh\@ K@ߝam.۵V@gf[ . 7%; PsAV;ll)OB`_@ #X 2+ w@=s @NR5O{.`GW6MYb) upA\]0~;,`cbf P? ݄#R'ff rrppps@N ^az;BV&@`+ puvzo [A`ƿafv`OL{?d %bo_.#wIIA=ެ|V.nn/OPEԁHp u_ ixprX~q? ?}&$fo_'&Y7WU%P=+9o+k"Y]-lf+fԡ.o `{ea{7\`z( Z/.^>// Ͽ `g@]a)Xw+3 K6`"A v_o@N;ʿz [@6@^;f2ÿF  qs0-akQ0? J8^I=_+?ֿ53 S6i#؝k 46'uªap&M_Z}/iY9Zska;y,0!u!5dtzΝn8IՙA7I}ֶe_]KP=zkm@ kKhi9ێ4Eid :9Ğϣnb+|]z>ǀg`ػ]jט?fXugӠR1y]L>Q)1cGsynqͿ]r!%1$@:HV"Z.-]`d[IYܰQs/2]OToY¹ \~6quz k-ƳtVWxN[!u̲W;ofsn(?C~ʉlaR!aEyٸ@3\F {mֽIGmq@%ZѹTCC;Gu= ZLH^;#L/,{ b˱yMXD69쪼^;ǮdmICGZ;3$󛲦#FO7)0L6(m涎:;p(8!T!u&nB w(+:44Vin;oAؠvkIh֢f&s^T{GdDhBtd?3?|k6uݲ ͊_6&eR *mR{/C :6 htuds=*oY$QM`P9SXwzSq~4"68bNtjb R눘"+C?|% @êmYJ[V(o8Q,Hwo!J7,KTϱ8y~!Fm[ЈF qD 뱛lw7d.Zy c+UULbW18Ʀ_`y5ƿSn o:kF{fH9ӖѪ"sJJZWs_¢5;*ӊbPa# ç2$j>nl&_U.I3=͎x o7,Bj8N.? S8 M^oP"P-_Q0\cpha!)zSي#yD.wuD#J}B\=&[/~:-L^#"e_ Vʪ0AYO: Z6֧I:z&M\aE.Q%C溵vA2'J98veF G5%q6_“ч@)iTVH~T_sa)^_}>Y?Y Hsv/HeD\_>9BO,EHYr^^]. -Z2JKސ,j)cڄ67G$ (r=!WcGhXMKe]hF>"ߴAwRGs}E1P1t:;h_׵l?aV8#,5h%Гf8ʹƑR^ȎFO;%<{=S&"gMPpYތpF-7%lS#)/zbj!!MpZMr0w(ZZseb81p-|ջ:Um?2Tޙ;!~il"d$6F枩swCx+866Ŏ:]>$k}F`[n Oץ9$ ~݇SVgpDeq Tg}wCp0~0b&!Us5<\,K'+dCU74=K&\)IXZq{TkOd (U8A#;ed*Q$ݴק>_W-l [k(b|} ypRnok.7}j$Oa͵mJS2(O[Bh~4N.I X\klE*>~=\OvzWW4N?>'Vގh/z~r$+4Iӹ^w_f!p! Ӌ|z[L!>hs2\h]51MG ܂YAM$3u7[pm;m@Rp!Rr"ʮsV3N)ԋbׇJӉ3P0{Dކ7LE(.6V^tc~fq29f>m9p|9nTdb%/V2jd(83!t]j/bsI9' N_&+TvoO^fLg^hF/FhK7bZ5kg59l wfa6-)b΄B 2N+U-Z[:ܱ״T},y*(\j?u@9I&Ɔx{fb' 4wqgbza7h83>W)RtOMVATDz4=_4P?f ʲ}ģ137fcM " ǦDVR*kSبȜ4iƥvTj]oG(ޝ/x@.Ka$ )wT%"3B Hx^ƐXzGt 7U zU_zϜ{Zcڐh5cm`1hHӿvAqSjNyƷޠu]ӷڤwR0U,/fI)YzX-]e*G!4lH*GF<slcD}?d@06E;e7Qfܦl's$ {}HV nt3 㪳$ei̘r?u۬x!ZYY.gOhiON\ 4?h ؋EIֺ0K;߶3ۙE^Q`Yͅ~{"|_N3)dwAbVAg ^ Իs?r`9/n!-mI yL>yk+zDj9"|gns8mDkZ98/2tLסXN:WS9d{AS& DCs>'m-vI&ڢ&<*N \:JRjs6-ZdfdO{hxVՂaNF}G*RhF1Pdk g#fdrlޞ 04!DՃwx< p0 wV~5rLWo,E>x|$F7Rup3n!6I>IDG;܈lo[NZdd=gD,v6ג㘔{|ɱţGf2DeS b>$TGm/h2WTF."2|BժIn @4o󒱨.\V:h0vjtcv 3/ i,P%U dALVJ$kNfgZ'} S?HDގ63})4QH>n1__F1AdFw{쎉<%*{jȉW&Lu}SA*{zzevUеP]MF+v{̢܅2plOf~ߞTqueZ%L}~kU`^@ ߗ!ɣDz<2gf/Pq2 ub&S}E0d=|k˶mDΧd,7w1JZ9yLaWM1ťT[O+>zߗ2eyff1kl% V7C;_TJ`@TюR/fǮX(*y7/4@\`7iAʫhTF# ^PG8O+`y 68;*.HajͨT$OZGbpbk[xx=;M  ;t埓d Qz\׈.%-٫s"TjnbKsU|SX/B#-Bb@(ב'@վi^F)6 _ oD |ݦVA+/rظY1K$g:LÕ31^ϞYUH,QRVKj]-1c( jjeb.ى'hdwv`Th0N=##}a #(:EJRЪ7]s㜧vϿx1?OE$`L;2ѥ7g6XY=+/v~֩u!jh8ֶ~Z(aԕX y gwg‡fz% rĄ7 ]EF\D8qtŗPZ9ZR L+^`-AE:ڬiĽb(Zk` 2Z^QχwN}SV";겙E,z}ҶuU?lRЋ)ᡆ[>/fxl:{DpHK3)CfIM:C]%jF<շ*V,^Klҧ%IVO}KLr[GOxkOpvZ[9q?6[w*' P$Em"{&Mqڧ%r52/(cm!)ϑZXgt ]er !_I^0ihƻvA޼˔5wU;g9%I슄u|} åZfa{Xpu~kz?!߲'qlD| 茐Zr $p(a{EН}ʉ4 :4DMu|S%"#2O:,!{VKe> 9F;xX5cWNݰl-iϱK19+ .H {9&&G~|oBfTl:~37!/ѷ^d fz$s&ajL2Z=O$3}p4lYVf퀏tH,83(yyHu_Rb\i&H:TϏ¶WRKef<^yCgv/XU Ycۘ7% *鞯,@ܗ8CPkdxPpJ >^ߞA؆Ox-mH'^F[V;Ij)Rķ%ci 3ѤW>HؙhK)1aJT )H@ ;S0R`/3Vo MqL'URR}4Bf( Ŋ㢴.\5hH1ɑ,etnYf`,{'uVw'S$?|.^7{C$& ^!uCϒ Cyoʣ RYf2$SA3uRAù?<%Kನ( DBǃ|kƭP/Wo7SUN:NO18ծd$pXA7H_D ˽ d̪o 7SwqTU6\c߬TϬ&Wy;dJ[=)e^vZq-!-Q/x%/x]4V3ZcW/U\ .C1m ҏyop1`0 xRi#PM2?ONi~^<+Gl[;%$s_gU 흎 -? 7,?o4@^>HEXK^%9t!pCnTM80q+B޻jO-!6S52qS +8D2Z, h[I/hxq1W!jHSŁgG7֍{GѮO}oF+18,njNX"FQ|M"e:ӱނ0߂) I6c›M/F`H(\[T%V+g680,R6B:Xhk.襑vA?Uq CAqe;հiύ 0>wR##O]?wHMhWTRۢK(~GN@a:5i/|mĴU 3E؂QkzY?ЏXɔ+M0ʀcob=IqX\*.hyZb2*EwZIڡ@p*&4o(MhFWcol, D&o'Zlj叼Xlƿbmz sh9NuG0O{4ǧGtP0)ˁ5]X(٥xpy@%ke6K&:7 mB&-"x²\ѹf/q367~Dޔ[E$*00V-de)i"@k}endstream endobj 652 0 obj << /Filter /FlateDecode /Length1 721 /Length2 4672 /Length3 0 /Length 5264 >> stream xmrg4ju :ѣ D%.E13 3ѣN"D'щ5DF^7]Zz>쳟˥A!0HDT`n `P<V2`pb 2^ `@D!c ȹ*➋`+\7"=`tBTʹ @F`N6NH@ CqA- p'0h8oM8?Ю,Z-A t4x5â>_//u'!p$ A!dM m<?wt-w p f?wrCQ t1p 0YP_z9 $N醀#VB- ]O?ڏcN;z?<50 ⯽bP? \""X7Oa#i|žc4׻9$ #d |r o Y {igKX /(lok} (V{"B-XOΞuZjuӘ'OM{$ަ,}'OίmE3;1|KyzI!TB3`eda0$3;6/3?=KqrytnEGu2rHtn%MbԈpsڧ BJ ;`e`FX(8WD"Q/]*\ұaRƨoV@~CM…bԙe3'3'>]}TJT!{QyŦr؞{ } 2%.Evpz#J, Jc9u}-*;\pf4ѫ&wϯ,3o;!@ LGl** 7$WWpYQ5Ϛ5# o9-ͰEq?sHf =R=]q'b."_{88  8ixxs=e26R>-MԜy$l$Hr*ReK\w:(_``M:ǦBԲmhR@NP >ѝU%' 13atLjgt4O ")<u@VoYA38IG 4_?)o~[u.ᅬpLw$,ttQ[ \6Qb})Ŏ72K@w>T8~5,N乁c-Tlv#$I2<-fJLZ摳lru^Pd<=.m1MMf+km(=[3/71,(m}!\.·ڔe=D{ωM^ E2 !w/3+H6= M4A'Z,Dƞi*s\F. ONޜՍ 6 ۹,W!#%Xfo߷90 )!Us*@>i}ޟ|Gv-z C-d9Du1N,tA po%ǞMݩvIeʾ&Ĵ6flVk;;v^-YlM.#&l^D3 KYOhlu9ZM:IQtf\jwwŶLaG|-;+qm@٧ N4 8$ZTcg3-KVn*?CmY;S^cyס8'"R\R.E(/^,j&Ny[뙧}x0Q;>vdJKo7f>!ʏs5hr\TesnX͈S)lY,W%!%?b:I9;D>b60*/꘤p&8y\/+5D 8ǒܚsϩRXKIHdݢxN m& V}ih6{͎Q z|yń'<3reh;Xy3E ="A`.jbZ_+2f%vI^ف7Ҥz3q|Po_-g畈 eWGߚ&PJ/$/32pDqDwu&:`O#4) =lp7X\~\m+r-]hQ"eG>xTh "#Ud5i\*!' xAE@}oU4gnş5Y,tl:/IZo8io'"v){gdXߟ;ٺE+u7{</&Uiѝ*v|0l (kN1S#k>w?{Y9Ay|'?8*Yf dW(jP ]~:e!=0iټ౱]PEf-|ѝ6%~R)'ryhz`v,z5bphѵ1[$1ʪ{Jb~Կ s;_<9|9t*ʝX|Jy~>M۩^L(ݡ ֣KHڪzԴDjt³ޘy&m=t9+r[lS3΄QDgy+3f^x_hiޠdd357hm Oڻ;=F!}7;\+9n"jqK5T灁?"(l ,A]Dn,,fhaP)Feɻ3o52i@{;H8dg%lo VUÜ{#gZ#K 2f}{UZIݴzEW1M;7I^_w󱛍^1cŐ=!mendstream endobj 653 0 obj << /Filter /FlateDecode /Length 2480 >> stream xZKs`,"3<]YW,8qI $!_C~aPƇ 0nR|_.tPU@W*.eqex6-:j>۟zef[ GƘo 94^.X8{a 7y~hE$4I"lxX(M`dҸ,e[P()ӨS@=5o<=xƞh IA׎NI=uWlHE就?Tn#S,h!Sh'$h̸G\[]6xey )Xrۧ:ZhLK Z{JE_TRM &PP2iWȲٳ|[#;:3A82u#Da,"'rɭ8 +vd`Ʒ;؏;l}!R`[6bisKĥԱSgVވn -^F@rς[ylʊ)/ٞpyGRr&fM/[yZjj/?O.mR3˫X)8/Px@]lB?~ `s|Vuqvμ^wֽͫgڳ[P6|Et1}_0Sc{\_.y36 8M"{ЇIn m׭ݮw`3:1q\< r 6_0OJ!{x4so5+Z!KQߡY`8]ҹ[:_aJ{{f ׎)vu-뽧܁*VK'0gm/^nYZ-SqI "S{|xL\`{e=)ZY˪5v &Bin&?bxN1G3~{o;P86^Ҧf(Y =! =®> /ExtGState << >> /Font << /F1 566 0 R /F2 567 0 R /F3 568 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 60365 >> stream xMʲ$4?€e/˞v Z  F -Ժ􀿏άʈ*6 ogG㏊,Ͽ3??N۟Ϻ7>|Kro?y>LG|>l)p=.js|>yi/ؽ4}[jb~`]FxZeO? ~_^QLuc-LJxUxyZˉ)ޮ 3xuR6ѺFet`~s:g wL4޷2FOo~B~󔯩ք8PQ~~H+yI)MuMwM{Bi!k\wJܶ:wnU;%FqFW6;ugK6FwJ;˕:yb~ne_f| yOyO>"}Bƿx!մ؝zbwU33t334zr?7~9Fs"_/Wӯ󯶟gy ϱ;@r_=DpA׿Կ v?]OZEub'W' ]ia(n%<2v?soVxsc:+xާ2ר Սbϓ430,g&ȍﱖ2 q JJCN{am<;a `?8 )4r}6=NoARp wg+<'wE5:wcs>ݱ_*4r}$URK`?VNcNcO^pKi;hn8OY 8C^m }H@\OusB9yuXO:ќ@0 (\n3 8C^-ޠ (<5v߇ NsUC jU{>$lp 1MOכoMYC=hB s@pOUwo4, V}|mkڨфX.h>=~sڟ"|v/찦KBrwbwG?THYώ[Fhws|[B''k>'ɍ:3!3 Y>{V1P/~CQ2$Rae='I0:f !S Tτ B&] >?j17lќ#ﰅRh$iN7knc&B''k4$5fG㞧)a 'XgI+'LӜ"vsp8|A(tr4Nrp\,uy_|-Bc='I09Χ˅p*L4,NNc Gs|Uʚ)es_W^GKa9EpO8 >{NX!8aur>̹W|[Cc?hIvKW!akҧ]+t!aKo>$l|MX>$l|MKT>$lp|uqϩ{=c*9i KR!oy{ }H@wozAg6Pl߇ '?yvU)l+%"g%8/s}n/c^dT$}37@79SJ(,}@AKQ]2SU6Iݙ r#V:p=2N̎X% S.8++PH41VVtkU.(uVEbiSXYcUȠ5&" b4)1֪J2 G#$N a{$x ({P y`+dpZZh/ @STd}ZmjB ۅh҂=CWH BZ+pH@!-~8$l ?6P  nW|+'SpW#lbUH/Vbjnkْ>Ez'#5bEsrRBZS4,NNOrǵ<^DǔM(VV礯qXƜB^DW8 BE~4jNW0r`-ٌ Z (e9h+N\WEtL :9/=tK2@I)gINƜ|g\Dj B''罌":.R6eRh೧$Bp4ɯE;̄ ¡I]iI)Bc='I0:)'9>Mgp褿HzkOٔJJae>'LcN{xn $#d`A(trk(o/{o-2@( 'YiI X2B',NNc5QH(BF 5 8$l%( Y2b=W4ayPX=G[*\!)^t(sM  I6PH8B5d8'ۮ'Jqe8nB7:/[ǧr=K0~B'dcjiwSq9ׅ'x)ihH ~p4 iwg+x!a8WᐰBZpH@!-X}pH@ ֻ[C qVᐰ㾩k3_|ahI#֧UǺGch]憴`93҂C iگp!a`WᐰBZ+pH@!-Xn8$la ܐW1ÏoK5u=jpS iMٯp!aQ| r%X><Ֆ)8[ur1Ū\'xJ&^%+XmA{~Xj+OuARVXYώZŊ4ꤼ鋋2P䤿ĸ_ױRZl+ sQ'.+aʳL :9,1-an) 0%z='Y>8as]_0K<B''%ƣ|fin) 0%Xgo}8[ Ә|yBZ,ž,*NKT=,m?e[+9}p4daʳI1Ú+/S`JꆍWCp4/@KY&X JKlQXYw,V09Y|_\ BXm-QHK~- (e 2E!aJ_C y-Wa3r `v#{[K& iiد%!a4גᐰB[<֓i-h[Zl!XѷԽƵ]M,g]eN"Uz (\B΃ ~/Ms~XZcN"HVK&~Z+K!< .uz,Wbo]s[pLٲʋn+M~=Hp]Au=»^,z9:Õt+XpI`a^41\y׎b"y9H?\yE@!.6 2\yמ~c"yҠ0J,, G~KMӱH@ mS>8/Õ{)#Y>?|.1t_v/g뱂zF?sE83Ƌn{~2='UAGta]RystPS aG$X&+*pHaH@SC,HU8$0NWSυx R6N፹*+L[ X8$0%^_C, BK4w2X>vf9Ok47$1sgHaY)7MKgꀧN* >T%-patHP?DżU <&F1/{IQK~IQ ~]IQ~IQLk$8,y<&F1/a<&F1/a<&F1/a<&F1/b<&F1/5b<&F1/Ub<&F1-:1hsR<]jKq[Wf4Xݮ13yZJü_uɯTzz.PsLyEدP16yΞ>7+<0Ǥ1{ݵy?DSq:/y-vGUbNqcҘ}ʳVP>́1+䩿 9} y;ʾsOPΓ9xyT ĘNO-Y:Tg^qe&OAzoV5ibn{G6UcRܘ箧S/* ן@WfsUi8Fe>cRHӟg$fsUm8&Ao[:j' 1)nsSQ"YoS`kUcV\sUK9! ,Ii>~Bӣz@ (pv7n)]1JdRO@Q[r`Mi^Gb}U48;ŕ<4-s]憤N 7 (BpLcɭ,E۔ <5&d~ќՉr[&1)f5HPԕgE褘_+ӌpL+3xyUye/N==\(I1I@16IMB IqP@cmBRsϞ՜$#t"Y1;IQ4IQ,3+=3p(f8&F1-167j (4pLbVCB$(Iݎ<&F1/~THW5cRܘ箧ߡ/ǎƌcnx|gPhJgLc Ǩ]J:&WaBiyRHe_Nqc{}+|ՁQT ƍ<ֻ1GuAMl5i#yΟA'IGKIߟg|t7\ 4XeYqQUWf󔯤_7GuT'Jb>tmYq#yNuAϣj%ƤL hKZ-;W< t=s/˴uƨQU=C+z.F LpeB5912ZILY`=99~_cm_cmzO-k`iÚc=ќǬ DŤ'$8 A16y}5{vU %1Hjy̞Io\sUyӵsżDQBit%}FWSܘ箧LL˷<>Hsjp̊+3yzjͼ|Ǵ OjGz̒Č'{^eGe`}8;ŕ<<{y\Ge ż]=n|$==T=᳗2-3UxV^-;W< V3UvTFWS\sS~4=2Q>[1Wwj Z\:H]_zZ7 bPW4ߤ_Tݍ3xzM[̤~_˟:!4'< g2瞧tE~ʢ~Jl%d1/+@q<D^bL./Bu@OAӅcb1j,jT=TEPQE LJHG;]LUјIU\&=TU-w6w B &=tWBa2MW\sHykA %QN= T=*((!MsyFUo8[e(NUeY1 ֿ4瘫z1.@q ǬPoDŤz*17I8~ǐ)-bVQ` hUP^DŬ$(f8&FqU^mYߧ<TU8㱲bb% NV5BFCI!Bm@$ܰ*e#uSo`Wl)bO׶WJ1qvd"PDŬ$(fT%8&F1+6/T%cIa}7:y]r01Ⱦ' o-ff2O(:|+סpio1]1u~¨UQUkIi4}tvM#c@qF!"W[4'?3ޮBгAgcm7=b+wOPOt S(hzQeGLYY5IQʋgQV+ Wc/Ɍj"ٿQebVVzSSiX1*f%8&F1)9167J?pLbVr@cmhPDŬ$(f%8&Ѧw-meǫA9 LScB9Ԧ<&SFg2I1)9;OxDŇ/[TvhwM LSc"MSBLrIcLY7n9n15RĘRHPJWDŬn8&F1+^cmB9Oz9Au84xQv}V¨aQSјYx_hiEUjsVVdWy*4um1O.fVOSbӘ~tqcD&=Rb):0fqds⦡!~)z#3sbٝ~cn b3Ĝ-%ƨis8+#2ܩscޭcbn )LjLv s) M?JO/I`<&hBp7=~ܻNhP VUr>/AUg-z'1ÁL1OwU  1aWAcl ì?7ì]WxLaACcl "`c@A I1z hpLaACcl gS7u8dj2?9Ͳj5Xoc2c ϑa:ع$T|ZfO;bdj2?MCcq6e0s`Cgz.zʴdՒҪ!Ƙ<@_虲oܘsMĨHb2c ϑa9ع$f vn1 6Yz7#U6҈MSsy4gLc ~1Õ WABǜ?^K]P\dGOVIKoSZSZ̦Wk콧l136uZs^ d'\g K=Efzlܩ30ezŸHجΆGueu"gy2%b,wSοpg{3c:5Sg`46q[=hW]p+$V]+zs>MLzg1VxtR4*)>XW-fƦϾнқ2]fa5-2+i>XW-fƦ>+MC2űf+?Wsp*鷬UWPJW_DŬlxLbV6$(feӫxLbV_rqW,<ّ_P<W>{aSχկavI)2 ,oLc~)gkAi yNb}9c<M`|m'W۬Sd^P#Sir6]Ic2.-怗XGRNc,;56mm/"0ki]}vY]l136 ]=uFT7n4mmfjK "4g:SY{YH98mm/Y.+x̶Lj*tSK;uFL˚>;m풶XLO]RW@vji^gbTcT4l*4g,19,+CyE)ǔ}c;<$7Sߣ$"L_v;S{OV_[>+M&&}7_ؔsV-xfϞewG~1IbD&;Rb7vJuQxc$Fdsl?/vb ĈLv>3g1{K1*nJz|1H#2̜,%ƨ)[y]~c$Fd9[JQqSnb/ Id3sbS2ăNN1B'cbTU*DJ4 wh0)Qum`pæNxj  7ADIˇ܈pЪnb<&FqӦ4J:XUobJ@L⪅:oÌ1I̖|bM''1^x{f,1fU:DŬC'hU}cmpL͍pLK * <5&d~ ]LbR̪>c ϑȡU}I1B+ ?QǯRgz05O 4ZA'kιj-{s$r(fUcm qXg@ML*{ĎI8nTvIQ*;(pLbVAcmc\j`ش"1xfcb?`Cc1z&;:WaU{y0|g ":{:pLKc,~T|S^u@vY]l1SdFtW:Jb]=̽@~O`y =E4|d\wKҩ-|V[ݕuK#x~>bnوyOT<wJ:>[3GRmz;kØGުL[6bSd.1oSp(Ɉ!Uu5 <Lau廁:#~~-CS{EJ XbA>bnوyObxi\s>Ȕh3_?q]*GiO>Sdn\s^gdJ4 kKe/쒺l13n\'32]Wpu\??i.+x3czj}G:#St[ι.M?i.+x̦B\SgdWt|\d>?GRAwع:Si~n&^Ic÷ zObxk=;X1V޳oaө!IdsOZLk,GO'/@ ObflBi}Gj*SV.CAUcg欿[JQ13g1{KIbq9=o۫Abd3s^f)I7]1XW;g,fo)I7^O01QLT1u <&Fq8[oo=: xXB߲$ܰ|kWk&Kh8"pFa膁oY16kȡEXj7$bT/DaoY16Cwhvp3`s!t|)xryP SW8pLb^8&F1`/hsmY \4lL8NVb10IQ|K@g6 rOu2Om\ Z̵Ǧ\5E| Obfmsg+jSo>(=7q첺zObx+e>932oG9Nse=Ef9=S?Uy)RΚCW^2ueu"o{W=u&tG/uun9BoSZW]VW)2[̌E\RE[c< v?7jljϤ/(puո#Sy-91_lļl`lP}psg)5V{qRgZk"s[:J2dף>۔ 2*fZϺaW9 J)1_lļ\3^QLVE\mGR_/~>N+6#떍50Kޏ(Ɉ!U#WDT7^6zluF{̥:_vz:J:b߬CQ]Gu>> ЏsFuI:6åy3.Mn;s5Kt3J-)2eFOX1ImL'\Ȟ;ZGZgSіW^тk2+QiVbɪX1Vttb|Ob:>G K+bSd>=}s^QbJcʪq?+N$#Iπ4.:| Obflei}jjSV SVNH=oz ;IG&;o)1Fž_} b4Fd#qOVտu^W=\>قl13>WǔU­{}AUNbĤs/A*xډu>i1H|d,%dԏ1QLT1u>4<&Fz[P[(n=Wr =X/:LY-cT\U_%&;ESm[~jb7^jzяz dHJ45  &4`pUAEGvȈ( " xΘG1Qnd8t}B16k-OHEOXǐ)b 1*1H̎ s16 $(]^EOXLj)_bJbT<1H̎ s16_ {D\m4Lc;f<'PcVpLb38&Fq8Y*`٘gs1$(~.cm{o=al_Ab_旙ƭjYstf{j"ړbnوyOzw_GGlK[/sf 0+ Z]{EN&[6bSd.snQC&c2|m]~EBĿ/w ]'6B{ !TV}m|.Kݗ|aʏs3FMvF>N;B<^ӺזKDN\h#υ)2{?B d:sGHL=lݜ;3Ȕvch]ϷX·=F$ Sd^zcf:WI:2uRbkeyL~seG=eIbq=7r(L1OݧJQnKt6BiM5~Oޡ?$󼳞=_tz^(Ҥ w^ξx0+⩘:=዇pLٹE. /P3JLSQcp_Zo)1Uµ;9gnjLs<!قlipO;׭ Ѧ콧l1')j5y5)Nc.XϹ^z eu"~"BB$֕|dΟ']7Z|mݼ>Sd.9l˖y}3Y5^7Oy)K?wwokg5KϛY[6bSd.9s2NGQLV օ<Q! 2Z׵)~Γظ71_lļ\tD}%1Y5[A`J;d4}Eym^t=Fhu``Ĺ9 ~D=GRʝJA\LVg9F:;s|#;ܓr<]ĽF( Sd׹,dtdn Sy[~9]#$\@&3c~\5s=םLi]ץ'y^Gʯ@bq>ds{̥:ӹGHҹ0%kh؏$sjOұ `J{Ӣk?g8 ~D=E>N|d. ՙd:vKwn Sڽcu]Sq,K_91_lļ\3oS:gj3o$A㏯N&Iq~=v3eOLv~L1{KIbq뫽~M. IdguGo7BzP\}/z#} O\UKg/BvdLv~KMq*:]Tbfl]i̝j$L\ $_՞xI̞9`4 Ml#0ALd8m_5cmڣ(`M50qkz}M/.$1[bSp%nxZVm>zq} c$&;?e$1<-HclL1n÷=ufkۯ,5ߧ<\k:*g 4 kޮV{LN4 OyB _$ܰnKq`LpǨx@oL1۵pLگm!.Vtlc4+Ӡ1qvd8o16kO`*:fdž8Fû㠻1qvd;[IQ;8{y;{f<'PLح $x>~׺D`) SqI8s1t+16~%ZϷAgX6f19 bĎ$(N`cms'0t163~|0ۏab^wނO1l),oܺ^ꋶI'U{>[nrA[Ab)2[̌':sQ, D[vӺBvI]SdB5ڮ;uFGc.uKF VȔZDz_{|9l|$xy42Y5^[\=S>ȔMꬸ gBwVw$uE0y_d#sWaΰNܺwU4d')2= Bw볕XWbj0纷naa!ۤ7n}gN/wzuJRWdҘ#~"'Qzu&Mfl}Fy]}v"CRpy1S5 kpLe=2YkgsuW$1{ŵ#ycNCޘ9{b3f[JQqH.uM$HLv~zp-p>ǰ Oco!ق\NQ=ϾMՀj(}~5c=1YO~֨:<21I^qH.:k7fΞ|d,%x֡|c#1ܛu * 1[}Ͻ/:ua)1*e]}/H1k|cT\; J[i̕c$&;?c昽׎򚱾1H9>#5K1*.X8&b2\7E9X1@?-6%?8 K;vc4V.D &4 [KT4-~wj4iHCpnYO ߪcmN]3tv;֝:fʫ@Lax2A(;b2:Dŵ{b~wLY)Ĩx{-^Abvd8tvoU16SsKizvxN¡{ҡIQ:Ǘ+;ɭ}A ,Li61q׾Day8&F1C<h{}xmz֩~sv/cOSL׆3}/ꞇSd^J4S\:M_>d{塯ނNLS4ûu{ VO|T~~pV"Urx/ѩqf?z}v-enɈ\rHG2Y5g3i2ډ~lZHM~#S"M黊d.lx_D;1xn SџqGrsgnSƠ/߲QU_! ~=Eg.FHLVW~{$Ȕvi)8!4&Sց92]"_o#K55xJuI2v<8O@_I}߲>)xC hc\92|Ί|d\sLSŏ$`b悤3չ2''%u4>y_+gG3*g丮E$ݯGYLnr0s:(K2**pUԙ_ȔY]q?CG#:x>' IFm,u/hVd S_%j̝YLY]vzUC(3nk4[̜t'ˈ~>tt>y&s[pHo~MGӊNGil=Ÿ"u?٨{OT;XH: 2+W|$3wZbq D1#cmSif;^k&Qqz_-$|dccTvʧ-$|dccT\zKյb!w-16kZp,3t]w._pm3c 4 om=pH kQD1d w÷DP &4 ]< u>0 ǨxAwи(;b2zDŭG/rgŎl3u &="s1Qvd816v;1s?cό$[^Tгo; F޲)$&Qq!_(;b2sԾ$q_ L{f<'ḱ/|!2{w̔ho| #&üv(16g߰}loe_|,ۨ<0$&Qq(#c$[dcT̥ oas1"̃{k=Ab)2[̌msu^N5)bkyWvσ{-.+xsntL'|ݝ 8 H=EfynLLӛeCj-k?a쒺l1<ϻgVY)mc<ӂ?q_|T"pxV?N0ܗSwy#Lʀ]g2ZcyyGDu>)f=Dy 92[5`vޛ~GD+T_2̝Li_fpOi6ڧAe ; Y>esd.w3`k.Qd!9`vuҎ8~]u}]q߯gk?B\"s{#$@&c2Z#&@#c2scގFl]⎈\?w&ʬ1?|N7a%_Tg7??)o*O$y7Ag$B̑隹 sG9O92|d~!9u6 ,_p>"u)2ʎ/ud*ضȜ͊/QG&3g5T=uf0]ץBߓ} yd.<'ߏ$sFLҹ1îQb<ҹLu=벿Z#iAG )2k?Bdufl;6FsŏtFDuվ[ŵ%c7%#"$I#(Ɉ!S0ss1dg{;>۬7nҔnl%+2]篳o"XPWdj0}u;4:/e٤iGG^o;z}$xzU}3QC&FiD>۬7n]G_l%+2]k|7CgK'uE&;? z1I^^{j$.!Fc瘳j$f_/3p5|LdL3cۣ\S `ʫc]oV7]$#|v1#2̜-%ƨu^xAg,fo)1Fŭ|z`s'ob! ֊5֓}O Y>قl1= ԩb@Rk7q}+tv38fȞ|bNcx ^<_uC c$&;{u$ƂO.1Sw{DCw }o?1u&Qq3GU1bd3s~lcT̯yӱ1H#2̜K}_uC c$&;{o)I7^>;MĘ>%2f=uǨvO?>e9[dy[|䙓5+lMG9z_t{Z!M+Pp[~lh@RD{ (}Iqso>1Q>e*^aHܘpw_KSѝLi1V _t0;f2WËh٘sgs"+"ݺGx޲IOLi2q˕fLSa_Qg3(эyyžω);߷NzfJFH0/zfLcbW36ˇ5Csw>Z;)d1V^]ABvd;I1̝8&0wc|^;Oh5%o>%Ą"S|q}~W:K[C=Ef9`[Uu@o1Q] ȧueu"?w=Tyi1k>oeu"[=q:J{D?wX\_Ϟ%+\%B̑i>oB;?nqsd:3n%A`-ywWɅ;B>}oTw9 ~=GfFCI2WpF{@ڎӲ*Zڝ|]~}u^;ddL󱕂2s:g{VuhIf!2onBgSփg/;;us"QmmJE7 O~=Gf;] ~%oT hK+/bJ,uDZ6o292k'C:g{̥+ Qd~UG??ҝ_Ȕu8Y#ržLˍ{޿:ʪ#sd*ğ |'92_%zv.Qd~!ՙO? /J73! Wuvxr׏+R}>(g{Mvd=~%_dVl.B:+t?h#ELQ W Hu8Wi?u췻#Sg@)3w =GRߧvJd!9`0~VIw1S65|yr-u:y.7y7!Ig*u̝ 4WI:>I|ή)ĔX] ^Kuo<ds{Wހۏs::GHҹΌmh@|}q?_:΅ #LS$#LV ƶa4~?J],S[{rA&aSd.}o(d#(Ɉ!S348b'>۬7NȻҺl%ԕ5owJ,~A+2Y5i)Vzد%tuJ+3foM"O̝j$ֳ~sV$!{f*I[dT'N5ZLdcg(uV$!{f*痜x?CmgLv>1wH-&Ip+7S왩ܽA5N Kܻ>ڮ'4/1aTq݅pI=2as8Fm·\g$|b<aw`MvJcn{0bSbu8FüWxHcFu1~`c;.<&ư?b`gN<Ǩu/e'|~<>F|f\nhu_~ӫ1H#2y&nb4l}Ζ|s_12O$?A cbcl UKLYc4lם:tW\-3ĜӕVʟs|А2hh45(>xE|8$}ʓ ?w]iԉXxBYBxg ^tec=3p(SYhl'4ob 1*ޞʼn1H̎ ~ugQ6黩aw?&؃fc-x3c\ϠS `\KwG<>>Sd^^932wWq_pD[QVfhA^W]VW)2[{|[gd< oww֕^^G?W*\%|y-1p0"SVt͕֕~c ;4 ;92}?\#92[މѳ1I 2Uesdj{A8K2f f%*:`JG;%5 ~=E _GHLVuG`I:7}^zs'j_x6\&BjGt|ݲ"s12Y5>шSZgmVT6t~>[u%+'mw|9Io+7>&IG&9`N#Wj@LWܺg]G&;>o$1"yNs毺} c$&;f)I7٫ig,fo)In]'w>1`4 o~ F`ɰG??[-8Fŭjxٌ1I-2ܹ7K1*nW׋fA'4fo)1Fŭy+6?ucsds_j$|b,?ޥzyba>u $8beN#qUՎ3x/']]vJ:Fǣҹv`FdjnFǣhO7kĜj+y]F ɷg}ˈDcťPuo.0&b:cO88~أc1G؞i#irLqR.?jzyO4lmOi!FZce i5JٛGcLo7G?cz/tpǸ.4Vaq}`ऐ!J7ÐI@3qN =8OADBǯ߹$`:7bjxa/3ywE`J@LOf>\hv԰e7im ]9/qdmǽ/qJ.n)71Ɗ3} 4Ύ:}r`H_v?땋c5L5:izD#/qripk= t>E|H%=1BDc{8לiL:fx3qԛ>W8rGVW9M5A&eniVޏt52N`~xА>fRY=^럭Ȝ2忒Ug{ņ['sҥ|]ui˳^Ȝ32\ᔏLjNoi1t]ui-oXG32nnF~7=mLQ{/uL (Q掽}y-=L=민T|s-;giӿY#>?6ˎD~k{irSگ7\[΢7z7z53+S(x[ˎDu=XA5=[Hí_ңn;YCώnYȤLm_wzv|ŜzZϞ&OMsiqa} ti{&1+Su<7eΎ)fVW<]G%2u|=x,D+Ϗ7dJ;|/T'F8Rvډט׈YgLGYQ3+s[ ƭ/䨄c:>*uU}jXúkOǯ Y2,(֗~rT±`QL?c-칉W>9)}ߣkNaSZAm}gĬLX(>;bfen!OJ8,9 ++rTQe^}y`ݱ)=ٓcaX=ΔzqYrl S[ އ~rAL{7s2՗8ns:cqݲ=3)q$2?{ծe nt2Sq:W<:~3lg]oq+25͌7y{u&ܳJ}eQW7nGw!g 3]GȤ7{yLԶ't5l5*[ՉLgOL?|GgLm{bNW#jd^>Fd:{b*'+0;KfLm{b M?P~h0-15p+WXޏ!_M%=3gWt1~='3i$=3 ^Lた?;@15=ԱLcsOs1cq41B0T,LcNO\:g/B゘Xޙ8eq4ΖĜHK4V|-{z!&bz~㻜q41wn^3qܻko-` HZ0}޷v+8dPaobȾz6Zc&+Ѱ P48)q Øi?W9pdmǽ(m- .UbJML{tDv)&15&=e: rޝFLqALqYЕV&S۞sBDců{YkL{5އz㭔Nbs#bVi[/Tȝlq$х:p$M ǽkmvbk=v'> D)61F:L4;bjX;#a4U+=uiCȌ${(t]yy1&&X1O ͎:]kr.cw֝#pȔρX/zէ}~]͎.H3qֶܻ]lWz rL:0hYk]127͂ Dg#str<#ckmYk]127͂N:tRpڎ-Kc=`Vzۄyro=3+s[ uڅ3{/nЗ-mԳﱸa{]A\WK(2uNigAy&:p|]lNwqh̡#}]{d81}ø ǽ_D;~a89)qMWnwUoëOh25OBv|E%w+xXraL8Jkpz߭oRq.}IW7_8;257|bv|Ž_Q2}8`&0]<3_/0+S(x[~XvlD%gݧ=XI~'r7ouK @zT4~0>-z&ejgܺ㱐uD;'9N{R{7`~"SZoJ?wNcadR榙q&H<7 N i&+ʊC&Nr|Q;ZI%"=VĔb1ܕtv԰tF;p4q*vF]ۂy2D/hvD}9"tS/.`:;b:g0f/䏤3BD{)[f \nL4;bjxt^JD:.A伧 9kt^A; iԶGXj<4F&DLG~~|B_D{Ȉ)3Ɗ{?~wtԶ'TsDcŽu1/E mO̩iƊ{x@Fbj#/[.g:ԍ>jd127͌OtVgd0g+ѻzP7h.[טIf޽/uFj~[N{u5fRYw]/uF:j~wpO轹g,~ței9{0ceŞ)߃ȔvTMgó[;:ۼ!0tPv,|½/BOdϜ+Ĕj/_3IvuFf/7Q0r*G_>_d)7/.ŝ?e;= o6>p4zx^܍}00+S( _I׳+*QG?[B_e49ހ)qM[~eQ#diQ[ׯwr25˾#cٱ(s_/Tz"{ɱBLIk^t4{zeN2jz|ΈYFM؈{,;6ec1`41w{`4qw˅c#3}:X:W{`4h r0SϋȌ$K iL*hX9Ep`m}30f{`oLcs iLTcd4V<1Z讙h\@&c#8fmL9*Ӂ1s}:0f{nOLcs ic>3q=d)wG9g9tJ% i|EO'jܰGd1ΨG>p2-]y25؀=D~B'Y7N4;V)q]~ @}z-άLMWI cɱJ/~ue+֊]_ky~dG6<*25[ɱ{,96@2w|j>i& 1eZc =AȬ?FeggQd|R]Āod~d[Ǘw 25[_ĺ=%Sl5@cpfXLAijݒKO޽ }<{{LcsOAici>3q= c1 *E,=r0s$?AR8ፖs3LNu&4Ad1LpS_.M{*^V3g1}' 2{4L)cـ>3q\V#.qzL#Sg"uL+aME<1LFǝ25>7|M=#M591nc{8\0fz6},0fSl@8 1ܳ}0f{6LcsyiSGZ=*o{8X0E8fC&Rc$T(ewJWŅgSENkA͎&3q<(>2L*.|@?u &pN+4숩apWwbp̗{~io…K S ~L .uPUq" JL}8> _:uF-2yr,|bklxϥi=#]D]WzL rLcXcšk o85Y2[d9Fǣk[.ݮK4"S۞S1TѵqK4"S۞S1Tk}V`Fdjs4})t@ķW>j0-dR榙qDYi0Vs]3wgMz+dRYckѦLՐkܗї.Y_% i>ų^Ӓ32Wۮ|)\H_{<0d!2w|_~ѦLL=kK|z 3ažxj{Dž. L6_i~KG-И[FVq9_+Pa?n9L]O<gHm+BI =$q$GdV/#aY4zJGE`J\sg' Lں6W2)SǷׂ`'d1nGnh8X7;I:ov ރyƜ֖=#okA{L= Lp뢸ЫӺ$xB)sVo?{?^ }/w<6B)|n9#5gғ J̋NwfΫEN L}{Gz5zo2O5;>m1ܧ 0fLc;{־OL0L ?z)&zHL49>1ncxOpby[Ȍ$Oc1G:,<3K߀$o 3q_So0ރkg2@FoC6^Dc`36ׇcic20f}|a41ØiLc1Lc?8f3q>>x0f{Fv(5nWM߱1y:b'L+Ѱ 'Aj! @A2 Pwx8NxB_D=|bR`کV͎=Lc[q/@'t5:._ǽzM4;bꘝ3qr۾x{=(۟9q$G(Qݧ_ޥrTS:™7ߜlL= L뚞M'}/FͤL}5wGL+^+=d1G`wO<6W!2u| 8h:A ϭK4"S۞<5';eun/#j0-fR榙q X.0\<+h_ww O5.[טIfOxkcdיkʦ94G\ ܟOu._בIfpk#d֙NVc&Ŀe(q FNh?֟3+JL4?HVqYluE12w(+f'm篼Zh[[p^3?3)S_73ryѝpWyƜ9->w_(-з2HYF)5N8AT2c|+WU}:cfe<^O=E4mZ!|U/x")qlգLW뙧 X{ 3Ϙk]غ=w›׆:b&enz=JIݸE~PP/::jzUU4Sڑ{gmV;j:&qssuS\av:sH0c&Ƅ_%0}@g LȼU NDz_fq̄ۘp4.0|Lbfc3i]`pa$av91Lb&c3q,.080f0ØILs c&1 14.ptOϝ򕃰^wWݙfMI[کvT߷Bԝ xN5 T-c,+h&s4$;]e]NE;8f=icqó8f;-NȹLL`&XOݗD#)Ϣi oxccs&t5:V.}ƨtԶ'xh&LmO̩i?n7xSDc{c1&79G,3qq9M5A&en)#3{xQ%mO陱jz=ps=D߯B&en7y:Ǟoyը]P>#SĔ]4MR5G8;C)Ou> .+79&nGT4Iꠏx wGLS?ZztGuٷ+)*gߴGy?P ~m_ L#բuՠLs_>"9'&Ϯ3O3FK~U N@f̴& 1h翼r%әFDkO{3MӪi:!nc}.8f3z8fx =yF|MЙFKo;xwSߔ]^SED]:jDVn3ħ:]cx0c%+}%4G&S;3eZk5ktK=;#%1ZsYS{GLcr3q̾wq41+1K_=u>#ͱnL܇;gyu#F-19 hkLf;8fzQO5#SzWM];_ù}phl!2 9Df+fX޶p2)LLkz<8iOI 4ߚg SoxYf̖8c,ߛ)*]cDf*.YAȤM3O|Enߗtn0c^ɊBfep\Ɋ c5gx]I`VΌ .;AP2cN`|uK:g3+s_ '}#d݁)u]Ά f|OПv74ם2ϘS7#;$ ,ؽ׽ioeRٽ uu3k[`+C澮yʜLƟz8;j@)sVA {g>5d2g~V.pys<͔jMj&7&O7;huxiy̬1{g1FLc1ӘS>6'+=;ri0!zkj1#MS26S ϼisGf8Gf&X< *8f|Q3q<_]g2/55/F&8fmL9}Q3q`)q41{p|Cq)Fw>!kScb71oc1Lc!8fOo葯\Й7oo!y^ +$|8dP!ـ! œSa4Ʊ8[T3qܝq_n9Ȉ)s$+at 4Ύ:&O|<3q< c1i8QGsϊALqw;^< LgKLm{bNii'o;>[Yj=?w9Cɺ?՝ 5=nΞ̽T:CsfrTZϻ8usūg̩_b4ĩjdžkvC؜+^gP2cN#{|`j7C֙Wc/"yl?N_ Ԝ#=0^Wǝ2O3Ցoys 5jٳy)*fNBZwY|!A_/ylhy5(Ӊs-.8W䁝̾i<s<͔jM=ՙ΁V5;̔i{qϏ:uTg,XqgLkML4\,ך{9(xzՉc17Lc3n"8f';3|4jS3bqM1 c17Lc?|F7LcSwM&:31nr75B)X!&! 71T( 0f㘝Apa41k,8~!P DΝ:bJ>1Fݿ۶҉ Lcd:[bjs"-X'i"ɉLc6,8&1Ә9w˜icvKalǻ8UTdROR12\p.<',шLm{b>YףS|hVSL1[^M[K)w7 ~ eyǘ?7[A{?l 2)sLL/-ׂWs|5Kݵ!=dRKwK|jY!r@\LptL4;bjpI`41;icv<.E:>v\\y$4_pe 2\19~9Lfb:s++\3q 20fpe`4gte`41;\ܯ|oe0+L5B)n]m] dW&TH& 2q$BqX1찀+c1aWLcOFic (3q<|o!sңXKOjLgKLm{b4C%+>H}RT#19#-xỼKKSԪkrܙ!4SVxmg=+}PޏΈa8$|8dP! ! Lc 8f|3q&0f=M`41{L8fɀ1Tg2`4b30fkڱ:޶)Ç};D iTAM1טƛ1Mud1ys U5[ׯsWh0gV-f͎2{^*ݯsͽz!9zd RĽ{ުǼyƜKF[z)V]GW2ϘzG>n'Rƭ滮1d>gzG>륂16Y K(aw8"$ hPC&J0f+P1\i,8J+Tga41WZ: c1 Y3q̕V˜icbuLc*J+Tg}~U~Wc{Erp%rm*~!Mud1yslU<+Ák{t_4+gcdb2n5Ffdʪi?4*ϣZx+l!2sM#͎ʸUsuϢ~X+{ux]uuf=d1զ 9RYe|'5UQgՀ3洲ꑏF*6Ϋ̫ϙU+\Y _|?:3d1uO9/+|<{ǃ9feҠj*{=irͯ_;Y|hyu({_ 4&K}V֝W2;}V=˘'+<[5GyW˲***}i~õA,2jѺP ھəs1U>[-Zw^ tRu--EF+gk<[ay,6<~cmXcY96&yo9> VIކGW|Y%y1k#ncQs%y1m=6&y5Wyx6z1VIކn+ՆILr^RZ/45W虅1 uZ3iXj:m_x W9g^ey|R[nuZ0@uywwL5O^]]knuZֈ'eӲ憹Ӷ GT& ZsӲF!BY3_V=bRMmU'j`ySfg;RzbyTOav:sմ3 Zc&ǃ9fg:v2}kaCr4ρ2T]8;zʳx0߯eVff>Ku3m(H1FVWYyU]DžDzj'%wܙ!y&˺c^9ٳq3~3ӱB./Ԡj&gz}ZX*wjjP>m]*{g3Cy=7+L-WKU`0SmV0׏hY5jJ4Wbq_% 2*Q/fyƜ jU;1,iͻk&NKٜhux餎tZ1k>^9̐<ˋ uN8[SMu;#STȌ.V#T3}nxlH5բuՠL'5 UP{6^9̐y)cFL]sށΉWG怙Nj~U{gW`LY3j4eLPy_VyudxcY{gx0LCLL{^Z΁V5;̔ibY`WcRl-3T7rRd˳sL#Sg"浣ѫWKS)X`LkͣJ_JOU+d`L'G(*|_!L*r>I@Rˇ?8Z>a41c13q̵|ØicCLcs-0fjX1Ә3q̵|ØicCLcKm}]OϺު˳&ǝ2O3x]33LpJm:90ZsXc_uUZcX N<(^E7fwfʴָ<~dƊ;3eZk k,^Lof#%+̔iEq_KhGfXqgLkݟbc5Ej~l &K4Vܙ)ZsD6Lhx0cv~n{E'59&/KqȄۘrT,<c1 Zt+=S%qd:1/&~_-z$2f̴<\cήb&e pSwTzOeW"ekai{݃FعkDftVNמUH8mq$B2iC&kN8f 63q̮8m0f]3p`41ficvi1욁c15C Lc* 63q̮8m0f]3p`41Zi1lj3""1&j~NkSԁ͎e8fWY>-^ aksk͎{yj5 V◎ 8gĚwf<ϔ9"Q)S_Xn~%;Vhudؙ[c.ijsܙ!,S[DLi?#ߵ~Q#sL'δ}־E qgطz-gkiz=q4,2 .WcTS!ItՁu;̔>(-tqf mոo{3jLhFe'zZ}p_I@dL*'3qN /0f㘝p_`41;)icvR}1준c1ILc T,N /0f㘝p_`41;)icvR}1lݗk?Ǭ,(maM;3df:qffj>3`vV9lN:2̴䘝+nϯU+{2kaMyiP7؃ⴰ͉V@֚Úsq9n߁̙&ǝ23e~Fh䙶V&Jī#sL'Q[;9<;ǝ22Ch28Wtxudz˸ 9%³sܙ!"yQ)garBZf:x[ ^gϮ 3w0C33WeDQG!+̔uï.L3fߝm gՉ,]yYӒ] |(?iewfʴܝ̻`= ̖hL'5<3Nfߝ 癦աJʷzdL֚{mw\zlftR-ߥ ~3³80Lєi?7Ah5XL֚So.tP,XqgLkb;~塧&g Ycŝ25w'5DOM4ML֚%wJEOMWhx0cv77"M59fJpȄۘr8~ǝ&.xlٙ82L׎bwXk+K2kܐ5x0SᄱW9n2͑wfʴܝ洝_͏HK4U̮ԫ [od22L׎bwXӾN|u*wfʴܝ洝kg'*;mY=x#Lw*$ 8dP!g! 5 LckN8f 63q̮8m0f]3p`41ficvi1䚡c1kN8f 63q̮8m0f]3p`41Zi1"W7KYi8[qgL52Lub',ρ25w'5U,./q3R\HGM5g59̐y)s`FS&ϴ_NXv|(ZswXcv.|+~A?LcwX 2o'j4eLfsNMsL'>(M/fߝ- gro'j4ezVuqgLg}~T>llƏ9Y|ub&ՠ>ɘW֙g_`LKb|õz?FT3e:A?l9̐n2.?3SSV4G&S;3eZkNDcsί/>]~1VC&ƔU6:L脥x3 &b^N 5V:*GDL֚߭'q6ҫ{Jktܙ)Zswfk kۦ.,3]ܙ( Nk|w8yxz}ɱFǝ25wM4+`-^5:̿{cYe9v;x{x%^2 ,9I@ǁc1ُLc8f?<<3q~xx0f8`41qicÃ1ӘŏLc8f?<<3q~xx0f8`41e1lj?O1'>w{,)vgᙱ&ǝ2O3eA) +,~ylٜxudiyxl1S\Z|ڟbk{fqdrC%LY%{j`5X֚F|Rm`9lT3e:q3>rSC;}wxvs<ϔi-abK@f̴<<5ؘ{w"ft|^Գr:}xvs<˔kMj񭩆UB',ρ2ݣ=t2+xvs̼ȔWt<&&RF?kv<1ZpHccx?~`6LΪFHwggx0JWLUʜ/kv<Wq~Џ:phq+'bZf^zj|5-oUf̳w<1ZpX㻔UB+L5V1RspXW92jt<1ZpX1W92jt<1ZpXW92jt<1ZpXqa!:_3̓T֚]):_cc0jt<1ZpXV>_,.:_d`Lk cb؈Ϙͮ7񍐬`Lki56Lc4TV!nc1wo)WF+|g)J"2%f̴<\cs4HSMEI|8Qx%vW'Fko8QYÉ"g.[c3f:އ%{\eW2 ,+I@Wc1ٯ Lc_8f <.3q~x\0f*`41Uqic1Әů Lc_8f <.3q~x\0f*`41UqicKX0f8o݈IMw+vg)3dfJєez1=AesՑ9`A&'p4Ʊ)k{h邰33gxX33eFS&Tq_+Oles9P԰&z,ҫ@LcwbX33eFS&{ѫo3es9P+*?9}wbxvԥW'óshf5Ud9jfǃ35?4:[^09 sCș)Ia_} q6<k-az䑣)f̴<\XŎB槰dAPxo)92jtܙ)Zsw=X^8 Bwfʴ]odG!SXsdD]ʴ]ָV; 皟ѩwfʴ]+q2?E hx0cz^Od`Lk`[Yb!SXsd2x0cႨ:kL+i\j]c9if摹3Q]Hx7Ɗ3fZk k|c=_WT֚klΙi1;$C&Ɣ_}/x srL#`Bj^0l_۳T(oqCcCŋLbf/3io`$ab1ًLbf/3io`$abп1ŀc&1  4^ 70f0{1ILŀc&1  4^ 70f0y%Ic&{+?Ko\%3O3FWf޸ W֚Úf$78f0{"Gɖy655J+XĥH=wdZ$3ݜ,G筮DȾLƊgZknΉjoJM &K4V<1ZpNXm®Dй'2hlx0cᜰƭԥ<L4:̘i98)e Dhj]1nc]mkq IdZ$3dwJ kLƊ3fZkΆj,J|;J|ML46#^kIu^cDhj;%$ǿo˿JYa}un{__FO\Qyez{gw wǿ??mǿa/w87_M4&m7OՐw_žWF>Q~}'? ǿnڿ; ۿǧ[Z5Y. ?t_m?+?tWKg; O\h?˷*]קd:2Um}r~M,{?ߎs<'R|ş?W{z`?#7٦=V?z܎׺ݿ?qs_{9r܏WǏԟ̏*{i?Wi+-_߸2s{9C}JcR}~ߏG9h~e~g'M۟D~Yx~ak[)]V&*gl=Oq'e~OSc=˩q?k U7%wˁN?c8n_ҞoO=JGaXh$)~K54g";`x R>meJ[>,7߈W2wya8[(Gqc%[)_(UǷ7 ?4ַ w Q)omx´c`u&zOn=9VJvSrGR>Nݶ0pܾw|Wi4}Ghۏ~m~ VrDmJVy0lVQ} 8nw+Gy]uE_sZ};#^oW}}ݛ[('awnJ:<9߯k9S)[ O){SpJiqw+W)[Jַwp }^TޒyWLZø*rL>#}xm~|ݯTzo\ó_8O,O}]wU7}k'ǟ-}kYd``>rt?,G⧟:xhGul?3.`-Xr+ J~H95=B/8n_.VNuckx;΋:Ⱥ^qa75JݛgJ8cB^؏&|gW9_"׿TvAGx8:0lsNgftYvcq_.>{Ǔ=ǯ'}?&k׮@ |ŕvwu@*4!1 &Ovf%dheiwf&µOسw朴z^I}%_jԏ4nz|th,r=WQDՙ$'6~ϊ03Z+*zzVPȈuj# x\^ˊMz|neLn.b HECD*{Q֫[f7G h|-|6c-um_}hXP'Y`MDE[#Sc/RRQ&釳[#[C@7ɔgj-hB&I/2*4z1m, m$@YV>p =ky1YEN7-=IctCh]WqyY'IIL&`x6e6z/M_ۚɎ#YcY;h@RnT @scwH ~DŽ瀄 g34VN H*/2-z"ɬ@fL,@@"kG0&COr [;& 8qLF\d|sÔx3QcRR'*jn-KU]=ѷԐ]sᔑHA2C˰_W|.cendstream endobj 655 0 obj << /Filter /FlateDecode /Length 639 >> stream xڭUێ0}WX}J&x\BխVۇ]B(wCRڗ MLf9ۃ !sQDER1B,ɢB'b2?|h$ ** hfq'K'Q0'FPG)d."\.3(@e((IrQ[97;ʿy_0OwT+GbCϙ/0UF6;gѴ:Ma po|F寈fK3iT]m*0֔ߏ}U֐Jl9ر)rt]Ѝr 8_NlZ,-uGUGPu#T)%endstream endobj 656 0 obj << /Alternate /DeviceRGB /Filter /FlateDecode /N 3 /Length 2596 >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 657 0 obj << /Filter /FlateDecode /Length 3628 >> stream x]s۸ݿB ׵Loͥ7w&=m+DU;$3}Ebw5ًo.~Κ)M9Fe5=|R)[9ߦPX>EoI2S;13unv*P2^TE 9Y_E*\U *4:+jY[&ڞ v0- N=DGKءø45Mx#7ocUOV7{(2Ts6"σ"ڒ7-#^g ၔ.6//ыHi#dv^V%! h xYS Zg׼=*^.H`@9,q4 $'J3Qx ;l4Yq^d0QjC \u#҃.k;k^]]va-9#-.l0_!病>~WJKY@U k0(q5Y>;2;_Y$PFs(tt뷉CXdЃ 4"iY,.EnqG߭ElwV<|b@1;~tѹq)@,?U)ۖlT1PsP|6j- (j5ih=>w$HQCe"Z@U44Y2O#C!W&ԕ5ORE9T!$XRt4*FnEԒ5->TϟGC"j| ̩膷1%zJJT1SyK%CKՕt Љ]CB(C7O顜-4$T5'GWN0zvOvYi3n v%GG 8rg1sVNE33%A\z㘛O5̮^"m|=~qowlYXP$܈휕9Ѕ-s%!WXΣ[P]驲p{ q;'Ʒ'>hc+,u%**%}~e5&C]/)ߊ\G ($kOu` _K XJ~=NspBFp'ClFPPѪ@hy/Ob'o[3cF&2SK?Y7*E%Gd<,}A-bSI]$qxPPe ~|=;R |w>+J8=C݇MsK| I5ۍ(&8F]xBh,\Ar8VjJ%=kH8 ɋSc*o*wcp!SuۘN>hB@׺DGt_mNq aNdu IOhcʟ+OʉٸMҀ$yʙ} n/.V7%I83i"©nOAK:1]JWwģ{"\Qt o)X}c/mR_j\c*x)L\2fϱÈjꊏUAKڻg lnZ{ʶaA/CW)T+ގntz|O9Uܖ!!PQZ@ 98F欌tBr%'oW'yp#5>uOY1Oʊ)卂{J9$vž1C{(8Ic"g)*3`Su@cs_E|ʋs /nOl*K^:z:⌥T4ѹOciSfS>6IGثF(Nms]ts_~-mj5nLq|۵;q"D'G v Ώ0kK"SaVE@bxZ1sW '5xBg7{x6Ӡ%}&}U`B KqFľ&xc,Or{:v$J%n36'/Eo#]DUBC3Ӕ?^]BS!g14Ylg5[ Y~Ang%; 닿%:LQ:Ёx=_@UznO^0T55fI > /ExtGState << >> /Font << /F1 582 0 R /F2 583 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 1954 >> stream xYM\5 ϯKX4q'VT '@ ,R9ṇ̀O_&~k3庙Oʦ&ۻ7)vُ_v9^NON|'8~|4YμJۤKc{}5V>?R)4K\MLfv$BW٥$T"*)S,rw!|Qj00@ bS]iBxk" DDumqiuJ.iWi]$S vA聻2^:R9"O0`+sqS:ӈVjtdFAPftF d,BxмfIyDҼL*G8LJsit|W8{Bs[sE@^ 39i,-s5' d:RF79_ k$> _e_U{cjWԴ@Tm***,"G"RCB|$u2y} UU$kAq`$Ps}W@l _i҉ҁi 'rVGW湀[,TZ+( r 1+*5ˎȶ@_ut-w} GW@+f#R,]Q\imzRR,,]T뫌^כ,|E $Ʉ`޿66^\g+Du8ñ0t&+Q+zw TVPαm`wwcgmx'RM܍ Hk_xPm pl$¢׽٤l!foYGPOJz Yf&X2wE/>[_9ئgPZ03Sp&y}uNPݯ: \y}u-h.a}'/͛E ݯ_Ry{r*a:OQ?ϲKyf/H {9 %avvN8;l;8^dqHx hY$3Q.`G3!--m1hbJFkeKru]CĽkG¦Z׈ }akȴ-I< Az=\)oYM%]}VCi\)y(N12_ XV/(&h`+&URͅ Nyqҍp ]28i(V0I H Q4/Rx'7Ϡ}̓*1)o#[Lþ}U}ئ0yM[9 ?oT|/GlpWx4GSh1t,;y٣; ӺxH󧡘u4|U.ש&[o|fu7ZvFX14ܺyͭ ovoa^xs_NIjCӣíi]h׽WD Z=zQԠᦌ^+:gζڵ=zaFC#+=z 5 g4khrkޙ54ee_naK},ٍ vX%*ݿVpf!ݗ՛F3fYf/yS{ͮL#v,kv]ǍckpykG|gaK5TW kvCxM Pqi?6JCendstream endobj 659 0 obj << /Filter /FlateDecode /Length 3547 >> stream xn6cVD%yln~8R׷XUH5`,5U,~P31⛫/h*Uͮge]4]fR]~ߕe$*j|Tv]y?߾YDtYX@ÓuZ#H)DLQڃ MԅUC]^6TVv0}OvmܠA^.jO0ݡ@$o̅`盫 /b&^if%Pr^qQFVBD/تٷ?iq."9@Zjwp"wGZ,tEYqG9-YXr10d Sƕ͌+fr.G /J!owN+fY~\b$G0R4H1~h-wp6vHV < ؽ'Xbu(U q_ZzyT" 8aYC*|CġJ!}r)dYƲB6DlJ[T{'9杯rW,n -=l9wp7=Jm7<dz~ω BLjˆDagz_BNq(I*ԉŒvoo9ץ#;:řiFCY\$L]OJ ]eiIn%)JCa 'E~-iܗ7GSKs:1W^W]" j5ZT%ph봴"Cvv1 R&2p:8r VEx vy_W^|<ɬxbHA9ryK0L0lS&Go X@=ׂ 81>n0/\"T%+Ȇ #x$KW\!,v\)hީeXVr{80#hq89LwƜRdcAFy'5Anh-{} o/v*w(D"$ |1/XepwVRptY#{tܣ..|Ex0+ǐaXvв_پILEAe֪xGmkwb}c$veE?(@0pR)6tSqM02@09[2ƹ^[$1~)=wK'PXB36%vqL׍?S3ǎڐ::6'qǙuu(ЩCzV p#vo ΈQ'U58yGW kǺ1rJdWz"u JTTҏ`7h2vrޯJxqgiz(Yzv*Ɔ.m?xhH0= #%[0PLCi[61$: `[f +uYZN*9quX('% ]œܣG_`Q,7dS,Ո:U֡Xs͠.jcOsWKӞvEӞDv@ =^yr2˙B66^ՉSp 0/{91&3) /&h3;z:']nwn>tÇ*ɴV'IFyW^[y!{Dk6ԐL%Sj*OktAX"rTԱr]"@#vTi}Qn\ИT~΅=m,o3mnT9K$\E6Ǚ Xn,k濴i̦k{>8DzŽ+c6OnkӞr dU:s6pP JG=ٜ#P.r@`oؼȳ=8dr-|+:rղPK JUH/?uf=VҜ,OϹ}|&9q^U^Lyr/&sc?Dendstream endobj 660 0 obj << /Alternate /DeviceRGB /Filter /FlateDecode /N 3 /Length 2596 >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 661 0 obj << /BBox [ 0 0 864 504 ] /Filter /FlateDecode /FormType 1 /PTEX.FileName (/tmp/Rtmpj1RpeH/Rbuild59c613e21630/kernlab/vignettes/kernlab-ranking.pdf) /PTEX.InfoDict 591 0 R /PTEX.PageNumber 1 /Resources << /ColorSpace << /sRGB 594 0 R >> /ExtGState << >> /Font << /F1 592 0 R /F2 593 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 2613 >> stream xZKϯmj1 -CA 鐿+gf׫ V9hZl6mNm6{[{[q_ˏOOa!l_wo7o$+{wz?7 $ˮiۋ/w|{+[Բ`a/q5ɳz.{ [,}W]zs=-ڼT=l"|%_iټB{S{:6m%7M8V{S^\jfAo{A٣rnqw L8V튛{OZwY\l`uܠaq5QQ'{K1t{BledGbP,3#Rg<0kWsSYpLvGtz ?wJa(= Hgh5% IzPY!?*;Xp&0{_P#)sAiIW8sHMX@zɳ&\z _D5nÎC900AkcpTDN+wf+L/ēYPP$FdO"Lqs=1smmo?=~{px t6Oe|>#Vyx~e|>m貿8G87s`=rC(f{dp/= ŽaWkֶ&h}>h3|=[G[8.|.x5Nx.j|l<~鯎M+ bzvU&$$?]J_ OOivTZV0cF"r|kC.@D3g[0gZ"pTۋ}|}v{x- "sZT2!->w F(rCyY݈Z*4^q2UL5 냨tp(X/ؠ%-kjc *re$b%Z+dXZ=\D)fZmX@vΔ(!ʥwyt YZR&;JDEaDAۋ%My)eXDED)E$Cirl`I*nC޷(큶Ds @)l`TXM`2 +-sP"aaBbҜN0Z4l @){gfeHBWĠIX,& yP Ep ~G#5z(ZO;yP7:TzM > LbB=b'>Xtd霏 MA^YE5Gf% bnut!ZŊ b"?,&nۗ cuZ7:p #KӉ`Bm0Y[ʑaw a:E-A2VDZ<2Imt2̡ v`Ձh( 6X;C TG# LBվ^!w#T X3F/;A(?Q?6؅%h`[5 t0C \u8㚿N;Ůb11O(^_Vom`DģjfiDtɗ{7Bk2}(L+@Ԗך6WB!n탳°0| Au| Ah1ܭlpPk79p(#X2,{@)x &2KgQ^X(Ap ({?(TZ}xkx<+xZJ|> stream xڵko~>@,}1w)ТI{Wwh_>Zur߼J+;mQ+qpf8/9MIe˷U8 S$7v`Xr6O.W =oG>ba gq>n|` ~öɚ x@~k/ |]| X8(]f?-R9~V2ڵȌMV4o"7vBF6)|ÃLei+(Zh\0zBz%sL9ؙ3lݲbЍPea"jV<[PvYqC4FtKqs@ԏ{F>Y9nSF I{4 b2g<4f3ys'Pck&@GXW#xHeOJ.d>X|\zjX"#z_b-Y629燥@U<~ufTj~وubچH57_}`ZxŁ9^"f6`>MT  lL [Jo3g0g̴sB3 i4'h`Qs2Re" Ѯod[aԭ)>F΁I؀[ڭp.uFUo)c'[*5݇m!"6;J'HWZx?%4tڲGg81'i(xcCL^X.UU 7Q(3Roy~J  ?@ LbTe \e^PD0,0Aͪ,#vsJ-Wȏip iJZh#2 :|Ļ8Q26@ba.l1Y )9!3fAm}LGb(pC@+c<$W(GrX G&w#ӊzr6xnyiI 俄"m XFE5¼QHP-`@\(SL=HISDww5{#`7a=x%ʇ0݂Ԅ8ISjC2v*ol||.G+cQtYµO$|9NB{w !P|{'uO%|twaJ59qt3Rm$X|]|`fK/[ȎvͶ?(J_Bq8*\ɝHw>Ʃ"2N[7FcvνL۫aބM]&1I qb<ʂy75ꟊ `KYR|G'ȭ'i2\N3e |f!ypaV}},\u bɇ:w]ijҍ 2ٔnEmky|P};!N&MqBx[ Th}$D ~Y=sѓtAqSK p} ,𽬓}3MHo| w^eS+ o}.U|Ǧ4Ġ#JW$.6*cxwSC t@R^djhE9Ha(R5jxU0Dݥ U' 8?'|x |(mwPC;qds gN$̳H`߀"(e8C8u@L|:_Zr 2Ehu0J )Cy;}Y[* #H]yTcGM;q5ΐ|QO)я!C.G'ppvqzÅ֟4;6jӆߥ6ҷ>*CbF_BM \$x*J.UB-ڇqPCTѐCVpe;&+X`,&6g6y?c=*S\()jr WVXrMJ Z5 ʑKCz=uUfh͞=?qɏ||bk;L1ͷd)VB(1䮔{w}\;:W* :wis>Qܥ͒ٸ5EfN~>sVI}_\T$RJ N;RT~gg_+y,k .ebUtHBЈ j'ܐn|:QWNH+ڨUGYƹ:_0A^EM݆0l)v2;z7Saw?(endstream endobj 663 0 obj << /Alternate /DeviceRGB /Filter /FlateDecode /N 3 /Length 2596 >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 664 0 obj << /Filter /FlateDecode /Length 3671 >> stream xYsݿBӼP+dwNn6>8~%YV+:w$@ALӌ";݀ޤrˋߖ* Y.o{U3ڤҽQ*yo !S!+Xe"Y/c *soi IRw )* +JKYQD,a_@HI=3[\t)U$IJaϠ|Ml4" u=ngVi;hG6 @XX_^|!`'zTWT7,UUHCmއ-\:% ˷Jg3]iUU|&!8_ykYoȘG;b>~Jؚ:%`jog:U [YًئWOjYf%wk3Fc:0 ֥yK rhޠP"؂1yTcƙvGb`J} H^PBio! %TV<(J,T1- / 5s 3;.lI!X}*z)9H럸y56`"OntZfE(컙}hƏO2OȍS_@P`)*‹0к@7X ҥF/WeOh'/6wmڈ՚,׭i۷nC<#JlX\lsEkyd][C+~ࡇ[#8T&Je3VFLDehNP̌ e~,ϓKo DAmȓ#cG֩6pvYm L^tMByzK~Ҭ_0-cQqhS-nE! e1%7/&dDUa ҁ2{Nذ1vIE1b Xd$c+q>)?E@q#a^\ R@ܕa *aGСߵ7dlw-c&x> WYJ='N"%86)טr̝.cɢUw@PRdkmiU)Mm`+ vCl^P09gh7ɻ n2-hJ+"39 WY)(w߷N6! /Hj^ď wM0•m]UNtuǣu~bŇob:,{ZE"&&C:M"⩊eFD=%6;bfS5͇EwL%L5q9A/^hpDQ>F<NzN8S8Pt= \ E\4QA0Oz1y$үǤSYʾӳ_|&^`##zӞI> Qmo9,AQr21Buꙍ)Q%Q0yʤˡKҗǎ0RP(oy)LĒHPG i BXun8.%E<~ yϫi_zeفBnV:TR{Em11_VTg9˴wDŽF}_ǭS!Γ`)_-RȄΚHANk2BXăA6XZ*\y jfjY$הs{8ԖmsTCvW0x52vx]|yO鈘`DRe㎎x{H%@j "NaAi_\b@LFV8Z -3W- fwg̥[9oV̟qpq[I-S]5d[G^Зn {YU]xnCuf)UA}F>,\p:``m6 W#0 n`oBD Uن[XmnN1kvĮXzTH:419$k3b ٨n \DbGT! p[د:|_Z֓{Ke"> /ExtGState << >> /Font << /F1 606 0 R /F2 607 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 2983 >> stream x\Ɏ# XʬJ2@h [J09#{JygY/_-E.նYl|]>/u,=,}˗,Z~^/߿5<{]wyaI˿/_^\^`g |2mk°5r#o\zsFZlsyZZ묋]On[:0a֙Ms 'eﰴνw.B 4+ ֦`"ub5&R CޡXtaI3;'y-EquJ,"n46vu]>&J :9 mVpmZл&KV>ާxe ' z^`|* UBFtN7%t2S5Cf ؚV\^-,d,:N9"iޛӱfXjnIZE;FPR33/BLRl}qLbRKɇTKDq]&RI{Ce\I-Q_Sqii=gM^xJ4~"Ò !dER9Sf<J\]k57ƿRB '$fZXNf@Ԥ8-rDLl}bȒmCW9.f #OJ~1̎ Mjj05ى#P3 :ЂrG--(Y Ŀ3 r` %g| @)7$Q l-]ı1WEFT+%ˈ$%X5G=pXոZ SR$+ܬԁ\{攮KeOwlnoo>뻧#r.O\(gAޢb&!bt)ǹ>cv=S"7H,tOޝs$,YUja"yӢb9m.䴁l:-c+z\\M^zcy^s9;P<)Fd{ G`)ISGF/Hg:C2cI!8{`/KݚS O㬣œc9#B<{q;",-@YkWYKG9&ru ĽfEaqf+,/ Y%1d="]gDG3/lx2_|HB+{iqhY,Y`HR#, [`I!2#ly!EaBb%C]LE6uI'Ek#2P@{FS9s w?o>"IzT6*aG۞mW=zMkD4ʝ=^= R'iY湡>XMʣxC#r:Dz_=Ԯɪ 5n>ah+J :"RSD XV4_CeW?O;zdcQ>Ɗl/)-jq{NJa]\S5cC=#|o:$haEIO֚hpO>#R@|w(\:/ VhfVeu&m1_S g]=JҼppWD[-#kGg\Sƹqw8'ލ_-~}O~xlۛ]R{n;E=w&qyZɽݲtMq?bNԏ6oǾ=#?qG#8ߍt4pswG{G[IaKi}O3/OrIz ?I/xO`UR>R 8݇߅:%n]K0oy/?,7ow8n__gendstream endobj 666 0 obj << /Filter /FlateDecode /Length 2229 >> stream xX[~_a.bx%MMCP>h5wGc{2)ffӧb0Eb[onyvF5J7aXjl[FOӎswZkoq'32vz/'~' '[0 Y{@DBkҕI6]"M!7ZWɪ3vӼOmi( x(Q>=b Q!W?@X&o _ wr𴯨M!/.T\H_"@PE얦\Y4Ԏ^SE,qeQ,pDmeB ?I]/Oۅgjse)1EUSe)aV׮ }O/?R _~tѲu.0˥|tV E/-g=rv%>N`Ts>EO81!៰Yx앲ch ylrY{/ &i8Qez2Pdg, XF,K>(ƻ&>pL[(6zlXivz6e-DnMac~AvXLKfXal)E{_BTpL[T i+2ߺ)aG[k-[o~#(p\wT^n*`k ^a>;lj$b"5C9cˏ;cxg9 gf254g\Uv+@%Yα %$Jb@$ZDSuE3@/}O=hNkքzdg"'Dr}Ǩ7~<3m\xPPT!!=cd\ǔxFʃρC^J#ߓ=ba5@ bfNRLt5H!߷t1#~媛~ۧƄ۵]/ 4i87"M`ҭ?,Ցdq #0CcŨi;sp:K^4ʚOtdf٭>΂ ~_hUğ.v`pEWpӘ)E1)Xp=aK{v?Ji)&׮T9JIn.Φs8$6{@LØ:({-Tq`PSY~s ?74Kk b]y)wxo1qY =;>1!'1J2e w!H!T~&|_CC)=SR T"'VD@%+RV`B=, v:ot w2f,"8w)Ppm41%F`aR;g) ׳> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 668 0 obj << /Filter /FlateDecode /Length 4754 >> stream xi˯<#ƣ#pƚg1NF|twGO19 >:n3Ć9tLADjs+n98-4!l? 35T DKŘO;n[w|k" x< RDCF\Ğ _?yT`K<8X)Q 'G$#FҰEJP<-eJL@,6*;;R}5I-=%jef2nNp7caƐjX=L%pK^[3zn+:Xhpcec1<90 {m$VQŲ"\j'n5)ofXb xeMH&48$]<;+Evr"o>y$$03\ެo- vI?@.C-X ěCp 2-Ic wMXg9h[b *n)6x;If5Rϟ{[Cr 4d{nDg]H9΄"}fl2Y-k>frV+dCPh>oA*ЩMU- +BŃda3 Rg*J|՟vmw˘1K_H`b|6`6-}[!>k{'Kgs˪/ZLU48[ĥH|E2 #?86g ѶV1,=y(6֖̆i4 PC & )|cJkC%9kJ]JRhp 7& Y)&ZM `$+4݊,ڼ/yZ4loɮ}s;\o&/aU v3Į<*չ,mPuRF,ĴhSZ]S}!x0!)]RVRx伈d*2ޅRhc;n\n؟XőqA^1`Mg9*_: AӀ\A"/VItܣFxwT5`)KR֩yn0640E!rkJ:ed!PJT""gW .i%AaH7cwΚs9U]ל L!]eB gЄ3Hs.K[;e 25cݵ!! =$ N VlƀZHpf& V$g"91+ 5(PtIP(yl!D7QW@\V MDp & M(Ixv( .d3'&;S4 k3D1뭮T p,ؽXx ϷzVCkWTQ;FQ?|mx.^#bB8|pH2)g ~M>\K~pBYu {{Oʄ6/^1͵jo7݇ǒlS$Pc&<| RQ9-۟qP$RH;-k{L&#vn).Sw! /ݚ6:uE.v0Bml*:&Kq9_ YP@ze$,z 6`%/D~tcSCUdR<튆8:+PȄFeQ|$x8&/3ǴW z>{99qFB&U1S `?`6D wvxh!U>yz'o ֈ|)oKWQ I l֬Kư" B;:.5[,WKE,@^Hw$vOuTpL|#Rvb| N{`k,*%Bx|[xچ%WAK8usLFS~ 3iEr]wuܧQgrdd֌XȚxp[6B%BTGn:.HȪŸe\3wZb^(Ro ͎ؗ5 w|[ tӦT\Lu]nx]ww&9 'Q-VWփ ңHd =~FP]?^'V 7/9upsQGĹ2>%oIs?x9_t=rm=ɟ ‚W*DTlHߍ;/F>SG7Y}m vRG:zwB.+Ӛ$ X8hJd}[ǮGO,!~h ?Uғm_MYʿPx|dIRO|*6hZiz Obէe!a nTbOBBRYhW9Ni]edF'Y6Z /Jju],Z\nGoL{TS_؏2J:R/NV۔;KtFk0U8#PXX Tإꤟms6ǥwCQRBx1e~, `=ilHL;\Kt񣶘FM+Yp䗺H듦4\t9x8(6ܖWDQgH֣}&jb{Նuu(aM{H=~SOIڈ^ts6„wO0WݴƲ|0K,wB<ƴe8-"c|%@p0msK Ӗفi-/ᣖ3;CBT/_Ri>,ÇڷP趇cgoz^7HpsL{YTg>7~g0et*_y#&k4WW)/؇J& =ɇȭ&H?:Xg+)%OAI\ ~>(*"T3Eh1KvR;vu_^-L;B̔s-wl!WE &]V|;fD¿/S#endstream endobj 669 0 obj << /BBox [ 0 0 504 504 ] /Filter /FlateDecode /FormType 1 /PTEX.FileName (/tmp/Rtmpj1RpeH/Rbuild59c613e21630/kernlab/vignettes/kernlab-kpca.pdf) /PTEX.InfoDict 616 0 R /PTEX.PageNumber 1 /Resources << /ColorSpace << /sRGB 619 0 R >> /ExtGState << >> /Font << /F1 617 0 R /F2 618 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 3800 >> stream x]ɎWԑ<_%0|t0(@R)x=lVϛlXͰUSUxllzju}iqOiK[g>s{7a!loksw[ؾl?}V/Gq/žno_n?7=:]7Gj|ve+t.pE{pxm{ *׶7J7:7-lAK&&dr)syi{mĺgYy)Ub3ݻS{Ur&iQjlsRآ{N(`-HO%al)i'3&1̖zЮ&7bif=28Swa\ܖGN3UV)&|Nq8AR+6Xa^gɐZ5_ alTȞ-JݣG V*Gt=96 ^O~OBdjmB1]퀇q3*K 鏔.f/xPm=9k[x5{0U0<5sDX T >I:GV 3Wl`mՎ?œK.֨ӛicKuL >{=9$H,,b.$RB1WS7vѩ8'sj08=eT,r-~e PVe( :8ޙ#∹ Dl6(Ƽ+9|P!~h!8),{_Y#8 J 2sb{b1(#S[BݨV88&!͚] E $ۜQ✃f AR؛:̎6*V wjVKt!١!:jV\%8:8m_3sJ4MT tG7z q؛ӳUȐ:}8\o2frSf/Z·w/Ps!#W#7E޴o\p3*bV cfaJq@IB fq\.XϘ]& uzvgcHy:D#&d^q :cԢqeDB ` J)1grɄޞ038͚]LJӧehu+guuN{ͧaaRJRQ a$,H}/d],XewӔVQPF5 58*S92R9x$o|~%Wp\3QdX. R3&@Ftۘ,[e:S]$`GRs*(#,zq9VTs0ڠ(fx6uA`&N[O[) -2܅"ZȽReLbzHC,gܐ-^6gڵ@SKMGYںbk2 ]wh4Pt;hn_X,ɼC(AK<ŕ 7( 2*$5\B~1eHPeԃA~oZ+2HŷL  qdMVU, ӠاL]%AgҺawRɺ)BGҫ}byQ]jP>Mͩ:g?9 фY NC]\^h0 G-iqB=qD/<tRVxuW&^Zj8q(\[y"=1ڮF( PUTFI*VS `njWNW@k ;GEΆ#Y|U_(SąPzLL46-&z3Ј)A3d[2-#O4.ʖ~&Eå.h LbJ;ZI){A1rvT` e2$+x3n+on;My\*śx@1 (^HѭS|`ǭcbQ2`Pn*F܉RL S;y&n$`ŭ cUcS<7]V.7&"P`[ F]WW9쩢Gܡ|^ dºUJ^:SDsa]7vFdGY4qţ(9ah}u^ V+ " TV+kck:msn"0Ԭp̟x}AYC_n41qFVDi\T3.H03- OB@b\7=P(#PX[-ep~u#)X# IV0왹eht87ZmgE>FswrlK2\QuNv1MT9>똎KPR6U 2&|8!F+3,9se^v|F0N0^z[oQ\Gp\gG(D723IC."#[J*0M_)/0e+>{0f{22q1xZ&[VPFO9Q%#h̅"G]9}V."8-<6HZ;Ưdon2x\;# ꃸ[B8&2nϼxud|T̶ܴLX,@LK"ɮql_KHn%ix_Aw!gO^5%tu0:k zW7E$%5In]@pe4䭳\M]@Lb(DnRHJ}Jj 4>D\a6Q d`ޅɧA (j x_?*;{m<_sY;Z2z=[r"(bd ; %#^gW(4<-YI\c*uC 8ԃZ=ntF)'w]\Y[0xe,waxt.>{bd Oi`MqgO^'[vH(W$fF* r nQK´dϙȄO3gjy,^4^Abz||mELJ۹?}|ty8zN?}|8۽X\_7'{[{=܇sNpջ_HKgOgFQGg?)N~p'|7âŢw{էGU hz=z/=h7x6~nC?{J gφ>Y5-~[NtI>3{{O>k7FV9 :S /o~_o~xo.\g/ Ky\[ɵ_${xendstream endobj 670 0 obj << /Filter /FlateDecode /Length 662 >> stream xڵUK0 WxlRۉfX.?ü<`KwU7US\yB5՚xuhsg_C7bW[;BuڻK4{FWP{Mԯ3%NKd(fLdU1)yy&m=HBdZx[{H1x*\nGg;ǏwwVBi(^ [^&HP Q59m /{~eYk`)59:e3h g]"w]ٵd>v(9cqmo=ɶ.unw } ƥLendstream endobj 671 0 obj << /Alternate /DeviceRGB /Filter /FlateDecode /N 3 /Length 2596 >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 672 0 obj << /Filter /FlateDecode /Length1 1671 /Length2 10873 /Length3 0 /Length 11960 >> stream xڍP- w`Kpn 4ָk@pw 3sUUWuj*2u&1s)PlPacr0#RQi\lN =4$&.˫ j `qYYvHJy=Jry#֌9@23(X^#f rqqgaqwwg6sf;Y 1A.V53 h]2@WïT +uJ؂̀ί&@'kt"Oe?]owYv l`b XlwҊ..{ߊ&W{7T&UřdFn^Y\lgwqF$ hz,5nd7]=('+g tpp?l|2  "d0ZJ-įwyX_Ǐ ub ei-]UJ[(.x3qqعXllܜ׃Q1ǿl-?} t ~\ Agb5{b?LoFҮiTM@iN(_wU8er.& foi5A@s՟'{lA@3`bcevټ>ί3<R l{_;ldWf{]GsS `a^Xw+?|Ы56˟7`ī3 ?ȿ N> _g7b3_1^_.X_w ^v| dUwWjt5)cM^;ǻzfֵmwbLB3TکtLދN\P2?;݈%ulK^.>yÅ~MTm}e6ۊ0?Qx,VG@Ĥ!hC*ѕU%νWƣlyܮ^үih(TyY.Lh߯of0s&^H}O9u7cgV+4؝; ( t\cNQ{|Ǜ..4;ܛ0E@\$؁Eٙnde6 !*H*1"?Q'@3|"]{뜐MM\1FP+ANl.=I d VkѽMĐK3Wyn>(C [) 9zO0yyvAɧ!w0[.$iiqu>BvBcƄQ:q_/l55_蹗C5C7.&*i_Fv6JNmw;@ 0.nQ!dI/f$e-[/ 48d8CS C _x#dvk#N>/SpI:/}2X;>*wG+6HH1"q 'ݖP{k?NSUwP??VPӔ!75P ?@Zm%g5hgveH=%[=@M(w'(̒67E8h?SO&s>gOY蠂aMJ 0ŗ\fJJIEwBu11<yߚї ;zetS:PeFՉGM(6:87Ὕry<C)8 Z>M8uDu-O=F 'Gh}'bdf.Uh'KGQɒ1Qu I':F os=Xo(ޔ.n,!K#0; ܘ\eu/C`cyTGv~8U0`L;jqL+fߊ"C2/hf[6H.!dF1:ӨK_'>ԊN7l A;-^Ҝ}^m5썁kmpB8S#34WUF/Cb?1RE*Ru#w hxK +>P◞G}#E+3N{'h>HDxi(;}R`#"Q[lU@EY鲝=([Sn#NzdiJ5j v<+$IEh<߲f48>+,=ٛᖠ:d*2*, E_CRJȻ=7@PEu `(`[DxYǘ q^෥֟`HdMİ8X`ʅ9,Tf=KOS hUhW6}(9EgEAwa}[N|b=n eðU; %˼P؟=#=$s?th$. s"~Ɠ$D[[: H͸x鄘+ZG ٘e[7q%ZlV4$Qg桊7 MQ7ۊ?\l }wdPȥӎNuЫC}V<'\vU1m:8f s!q1g,I|Ę}p!Ty7i62^TcR8L(T,BTo4S gǏ3e1a5!bL3\كrQ*qF&Idlt?$a,{1_J~4/VKÓj.7FPeFRSN\\gTF4ȈFI✱ܥ^.*OkG#}RE@9D$,FOjKhC>]ŀqF H[j+Ob҂q!RU3g$BTGI>syhҾIƩ2wv5Ns~ztUpyO2,AThċ_\P S!}ݕq0X &* XMKStcD# P><{y~{y",_+aMlujځ~/BAdm{%nbܞ]/Kߓ"䂻Kwd<5x.϶;õ$\ ͩX_> cu65Q4f|L,TC$Ӄۺ&zE+QaTy1vLj)h+^PlzN_ѡ3sѹ] ҺitT0Tb‡QJEI/i3W,*u ;O+Mt3o*,ɧ)x[K8K40*dfO%q!?Q@vjiÑ#1R] ob›\|1,|&I&~ykb|6-ZO (t*Il-ZCdsY |ٗ^gAuqڥ_`v\W*Gcu.{VdH>sJ/8(a;Em[.'y: R_t"css|mYUj7eg1S:--($hFj^𺣮tހkxm} X:^^D8UQH9 !oMd Cf]G:qY-(a!oTx)FHb47ˉ546!iHI)/Sw_3NՒT5 1c΂KuORꚆP㑱`+ r.QHGD7sZbI,1[4䲆DT1 y0$Y%8H/8.gX!4(VKֲ_-)cFΛo-}X1@|4>џ)!0.:!R8e)U˷(~ K2 /sD51;n__DO|t{ӧ ] F9N]Њ鹉L,j)%7%b[z)dbe9|Lf4 / <4vHaR=tH)Ʈ+q-]*Py"2' JM .xW6; G'Eqٕ盅ڍD_EVxG jlsgUKB\FYr b4A[4,UWۡyV%z\y}8 a[oO I-~QuS X[>X*cB!@VZgEww˔檙Yæd{4eOzUH0 h3 Ohf])|T%5MPs}WwVݧ2/V l"8;G uqRǎr|ǝ?kr&gc>Nx%l5+L?)q+w[`\;b~*Lt_fYuI!bL1df]P3yQGv \/Wk؛q(4.5!؎)sŚ#| $(KƩCP5źN_ FvX8)oyHŔ1}[%m)l9=j%O`z V{wIb:} zihR}byUg%#Fc3oxFH ؤk6q1E!iu(l% OtW|6S-doǘH8|j]9}icLH#L)%ZmM1SrSg?kW"їꟑ8UDrFsӷXhYA^L"S٪"1e2À˞ ʀ_^sW.I=G>}s_9YwSWBow aU}p.q-RJwx&lRSp <0T=t讱fC"ӈm 9.È(ߖ@[#mh nP+Ul ffXkF-Д ,JGS}-䵗|YdGʽ&hpw> 57Tɓz.ٷh49!=56siԎ},ĸ/y;8Exu o*}`0CDq*Hڠ[$q/iW̐tڼjaqlw]J| )@,忆p—^X|\.*aњ w յ1=q ד%y\C/dp/5<̹NZHOt\#ihP=ۢ$O%0!:_Q濍D,IV`-&_3,_9`620 Σ'ø9 һ݈Fç̼YoGӏi5#}7[mc}G!\bZ"-w:D  o)[F)RM|ʺ#gWApI̪k̕fV,Kh"D6K.JNN+os)մoT5iL I0)y%rڡüpwr aoWOqlɝvNzS}ߝ# ܭZ-874P+L, a?I+̔dFp/%d{k'H4`/dNtctYU|(.B꜒-8Ĉ~- boP^e~H),lH!t/$PcG x"6p'S/)fV`=%qgۆ/-04:5,2h>G6yV_洹@릁ywp55FEK:n_&zD oE\2z]+cp&K!ùW`މftInSGjI  FOPu?z4m45 $](b4f}v)㬷#Ht♘ք '(脦Ban>%"1ZfB)s)b]Vx5wJiqR UnSG=6~Y|!!bk |wՃ N-4p&y[+ 'ou =FYɉߓ$n+R!v ;dYa&j>7 ,,)un2O Exs'Ĵ  n= f!C@R: r}Z~]r- k*F - }r|,L4AWFY"2x{#86)kG6v}KUmCǽB%ϧY"BF\0T[ۧS ?}Sr?f&cr MjIqny N601pNas‰i*u>y7-@Eg! 3ndYr,MT4lDo-Xj/+ˡ[IATfJsQ+BO|89mc.r3k2Qnwg_f }E]}9;i]IH=輵ƻ 5+" b :Kk7)NwX.@+RAQ"~ЅlG,YUCgy5"ej IN!ml./R<c97#M_YRbboT׿%sλ Io3W{P9qɜi%%53/ ^ #@U,貫B,4%_:ϪЕv.CMF7NtgAMdM36m9WhgһxV(7%OTSCvd$b0t{u3aCL>3?}]^tj[r/15^-hrGS2ºezMIq0`)zcn kN^Cu/ C\uo O䈣-$WP ~f}e+ȗ=YSNP 1REѕd\ɤz܏b7NYqewv$8b;Kr,n#v k3(zhk҈l!2cs{ãT!Sm=MTҷXsUĞIPi$4O())r,7CΝ64U7P[77/n?)ּo >hK+bC:Av7xMVV:;W:#Е#^9dʜfmhַ~wlXVkϣYoZ}fA7+]k _c?W螃z'+Iwx2JK\OG1;L&ͮнcG _&> I%|; >$:΅N 2]ժTݩaj\GU'lsW7uR-,Պ*N6:,oN#NYֱL>W2RuY*w!~ǚjeiH;o,W+ɓs( F# ?}\9?hLmxQ$UΠsX$|b2)9]gYKĐ J*%GWSۡ:`Sim$:.k&#D${{C{*qym *Cg(c?p?Kendstream endobj 673 0 obj << /Filter /FlateDecode /Length1 1461 /Length2 7186 /Length3 0 /Length 8182 >> stream xڍvP\[- .ad`4HKp \Np~{꽚3W{]ZCM j:8فBiUUE^ra@`ܘtz`W7I] 'qP''/@B P:0餡^[5Y-`W% قo90_%Ela0g!OOOv;F n`W5#dt[6 r% dv_VT;UX re*q :: N6k.{ c~AnPx>qY;$5 f qA~ |e`'d `K{q9Y{'_5VNw_   s `-ǯ:^A_n~>Pg5| qy0WwXA,a p7 ?|WK1=N)^VP'ϗCPSKKǤ/>6.^ <~U4@d pt ߦtԠpւ ?8;_U_$~0NZw\P #ZU񿣊0\N6p2qy!nr`+ euIրA~-, bp}Y78/@np~/ ӿuZ/ „=pj~v'( b:f~>/o .-N /p8p³a\\ztwuZc(`Kp]upU$'ƨQh:GvZrݜrfvjR.W˳?|Pz\QmPY<\ N%\5 'KQ  hPDѦeK' ǯҢC 7-XE C7ؠh4ek վEnJCa2Ď\bHD }=p 诉R4uй@_~~ERz[ &ImBTBFYpB&S2Dejfp-6lyãǣɡ+6G9j],7T'\h}(dßieHm1bZ,[Ⱦ#ZӋ"Qk"# 'K0;?stk*zM(&kNŅ?]>]j7NћW:띬՝$׹ZS @]hq<8Jް ]1& /ܙ|_!R@5P`Z`Y7\R:I jly/4ŗ){#Si%"܉hL)V'hdiwRi_[MV^qN.4ڼd2܈oJ1~G[QZ(Xs+ \|~"=3V~_Ui(>$[±|b `L9 6!_ɎX^sݨ76꟮f[~ԣEİi'tG{5V-fսdhl 2#FvpϏEҐ]gV~FfDGws~[UZO_bUwL'hcO ^'ݣj>9BgC <~F/][7cy)[G`JY3ORl1U a yd2ڐe6#(BDңO/1D1]1x)HbMguݶ&X#)I:Sz1Yg-| ?2DvO#rENSQob`3WFZ$ i%{lޝYoV>SE. e,+ڔ@Sغ>`XdX|Él_ZqR,ȇi|ЗwQ7.Jv17TtYf8)t2Ѫy_g´kY7m 95z]\4աyo( ey0KRvs"X$psruwp=فΧt친INuOfD]؈OyC§ϱ񽋢>c]FJ +`zQ! yahmbQ ֛.|b"v`1zEsgM4n@Gԧ&Iͩ~j}s!+su[/8n!WJ;]NJ4D2(E/R,X(Ql"Jgq2jX_ȓ'˧_wE<0+ y'ƺJ;0"7%#Ћ3O_:Ѡj01WU6'@eTxc}JR_2w']%K'u5\U4)Nb$-6WfҤ31wt{.]j |qPBezP;ghE;jv˓P1\a 󲡖tl. F<NիE]3/+C48PtG\] mi i3E$C)~ Q 5V|%aaZk[֌N #s/rC~y-;EfLvڦvBx'9[{.koG =ڨڃQ^㊎ԑZSk1^%Th%ľ}F]uњ(t"ąjSJ6Io[SV  s϶tUMܝBe([7xj~oNFF g<>?+5=L \f^6fěcJ7W:DJpk c+gI;UZUmGq.+<}"/"|DK =mDHJL_6jw*hl'ދrz%v#eLR;{{umGĵ- {u[uzEE>gJ=uUmE FGkz㚉6qȩ~WښBw6Wܓw]^YT7"oaI+>jHJ)_ i)D"ݡ1c~!th)TҖZ}ZY v^XT@%ե3n2Kt{V7l,X8H$ 2O6i(~T/{x͞}`oNF(mi;Sqi'.Uǀ~#HOjquاJz"r{V]3 FKl$|87 V"eϥ%փzxqX,1KƘ*~b%^?}DxR9EeApvnPp%0#.nݐms!9 b`^|DE!9c8]ՕH1Z|vm-(nzCĀ|Xfùـ޳2/r# ZM!8߱ަڷX'B޹g^=)%+DLʈkcw|BF5HB^`ۍNt1gʑsQix*WήDT~Ð+?U׬tۺėN=-(/ܬ9 {q;&`U4T\) ŪXeI,ao4whk̹Agi}}j3PA;9F3RĨ5a FAF/ Dl ~6Frye3GX2P/.~JIFA~N/.)^lW*?Z410;$7AȔ\X=Se ó"4WGؘH] ?k,.3MF?ERz`J-}_S(N7M~W`_%Gya `~bendstream endobj 674 0 obj << /Filter /FlateDecode /Length1 1373 /Length2 6101 /Length3 0 /Length 7047 >> stream xڍvT.!RCҍtH# 3 tH4ҍt !%J+]sֽܻk֚ywyװ3)!l*8O( x/@!BvvC(솄" BuJ Mh;b@ @#M8IuUupr%$x``7- `-`QH% A\$<==A0$?A EA`$l=. 3?!;E6@أ2MPWUsm_ _멬XtU4[DU ;7NwQbE |)Z+/{0 ן@Oܽ0yݣ4FBA伛8磣QCQ%0u_ "zY<lu&gG:pk5Q?:FQQanTxu+Jb⤑DIFtewhay- kHRCN9?x;9ڏ(g ~%~ׂ+H{.evb?( :zyLWl]@:csUY ?]r o/pp 4O6Ȳ/V|g97"{mF^}}9!D S:X76ODI3FSY)g)UIL<ߙ$ZWSw8˼oTУ?=~7dp|zv6U_o\Kg쮭9"/!xxZ2%:R 4VME=Smi-Kdc`0C̑R5|JONdr}s/)߀4cFqLMB `roҡ[ T k5!wFNxVfy8ZUIpN5b[%|W54 C:λ O\%Fમ0b}'޹]c;+[?=)yjio[/n!]7n=b;I ,wiYޘvzDajrW19Òi=v>P>D{y;z;SY 9.X=zܢ2 _h) ˸H=a$>N3+a e#QX1w_4XZƹFjD?{tyRvnk#Am#+bcu'^gM(iTUHipT* 7^E@]rSrݵ7CYe*0nK;%d?]yS2G彚'4Y>ء2!QGbɼ .HDi쯡>e8K=)sXW2\-70bԾuWMҲY 1OEȊ̘P b i7,[in2Il3(=vaP@`Rܕ4VUz{Ma_V<[IBx]e#h:@f̞y6VI%ݡپ5\:qB>^ބSh<:Me*/hH&75uGd#v|T(lŋIQbiLQrLڟ<՗Գ:{Qx9yn }_=A'i~sHX=#yUľ / Ԧ7ꫝ~E%9,ܻA Ӊ޿`X#I/e#qF\_:y]X)Q$9I|jX/J}0+?3(9k0 "~'+e2-O~cSS4)ג,Md'V ?,*F->W٢~Qt;*0te W.p֟.\V *h<XDEF\PʏrsTZkq#n)޲fI ǻzм3 4e5߁i mm| .UAzƖ{2r>)D{S5Z8&h"G̉էBd3|lIϞO-Ѽ['R ?5AX&4MZ<5tpʺlD4ʂލoq2V?̐.joXZ5mدN(8eu~)C/p BtvsPpEKbf>fb0DU7g ?e1BDywa˟l_ kĦUM+Ip_D!%\PqVOqT{to]S{sQ^,0x=Vezsw= E CMr :a5d8Ě;luΜpRoN]qKjrגt|R%Cul8cڹ~m8i"dQݧRG2xM٤nfx~_ltw{G}t=9\S8m.V597n?59w rvfN̠,w+]][̫*(G cwiM =2۾L\ʢk]:ɋ  InZx~iG rʔd˵?edPjPNWyL1C65q?RY噵"K!"jLd ,6TیPȲ4:Vd?50>dN CXzZD!{횣a䷧|jپf]q1]јE!ZKxLef(Dc's X-|#e f%-4273fka>i|Κ{¼%k(J8Z[#$:g} AK}UKNSKS^UTUc'q.fH~Řcؚ-rS ^RmI5ޭ 0F)~mLW!=8Uom>r+ZI2'i<̅ܙf&iVZHd^.l┼~6Vk})s.$pz/%y[#KIQ6JTo bb|endstream endobj 675 0 obj << /Filter /FlateDecode /Length1 1370 /Length2 5904 /Length3 0 /Length 6847 >> stream xڍvTݶ-H ɇc&*$@( {ST"(J)tT@(7s{c72F=\k_ehrCq@j`и gb!@(Tk¹! H _U,cj0An( ,!#,)"P A!=A#@,__~ ,-- (#(8 pHwp`8s7Eqk8VUivs?0w_^80xjY=$Q- e^7 P?8KDpp?47iBzೠw?>B}p D% Aj~D ~ 18| 1p`AU*9P?*ýX~ ֿ݌D!SlK]TQ2!҅dqt~.Y昧= wwF Yw?#PYVٹN'iD2#vkյ4h.ѵ'?ڼ,wrnօx .%܀5m"=*Tȏ CceĬޞ ѽj; ,\oRXRr_z'_e)r7Od/9:^ۜ']勞Ni29+ q2hfLIM8J/4^Ҏ{mQ^ ,؟VBX,RF](k< K9zN2*m<;ﯵzN=C'{\_}+\7}"|1ar%o{KC&~U|p&62PDj;6t+t*-aʜIi=J*G[V?aE~m7s=E lc~&;XjU6}*2Ma䵼hpurɴeQYJdc^y͒ 45<Ė 9wu&"ĖF VEGДtY#>W Z\Ǝ RD8Iaa1퟽70{ }#7f\t[sw;FHuZj0ːhI3D#͕$!}F&o:=S(#.՗Oԑ7CG8C!)9K2 n{A!U>wbw 4nXi^-o$ehch6~){JJ{xp1.ESUn8խM4s;;dR7YEx)]xEݡa/* 3XWi@N=J5#!6ù=;pXklrRky?XI\GBMgsYַ~* 0rԷCTFr wv6^{`0̵OjYuFK_JT 9+UO-cq= \&A~ƨ^樓۷ږ^h T~IwoiY^jl+5C=ř/&L^1A!JYDnL\ZKAϥXsa7eqߖSC|;w}*UU?^m,.5zi'xcA;®ZkyZ]d4BO[]2l"~vV ]k[KgQ U Y͐KK)M]krLUQu䀴mqGܭI6_QzSA&5.Vt.֏澨yRqd G8O]%(KSVRhhV_N}^0n{!ybYSqElijW*CkŸHآԧߊy/6. )*$.,AVYYOkE[5ɜIܛnqмKA_<ѣO<\nr3Fn tiPV\ҾN#ZM1Rp\ &:lUӅU'i {OOT㗍s.l0<] OU~jE3GR5aEx8Y3)r) rL(Ut=1pA%eb7a*TUga3Bw!u* s f;hhhˍߓDW]V9 ;6 ?>o]ațzEuqK}؞ T,~~ :QdCO9td.9xQ !Wy:/ǯWέ*ˈ<,KQi>\' uOLw;41Xzoc$M.il78~.4PCGƑuQ3 oۆkD< E rޞm?=M2a}_jWzEuX;%{qnKUUV߼5Jmtk9ʼep"Rt`cqmrpI=<[E؃ѱS")L[ ɞ[1wӉ#SýKvc_(8ph%bD:WS۠,o||PQU'ʃ}9 2i/] +[P0mp=J|~gJVRj#k^?k< gI;>1Еog۴cDؐ rϚij2/r0C:Hu25cwC=.VHbX^#gcH8&A7NƘѸ=`ڏy)W,o䋅$Imb9֙i&`bG˷+L^);CZvV%Zt˫ܛA- Σjjej?"vLHHѝ!?|vVj;5t*Iq= x=j)Dq3g)Erd\ e-5ԫ8ƽ>+pȱM&E@DO/ߧ2S-D f!KZ:i7[@}Þ԰ڥ\QR"vCzDrZ)dM6 ]u>t+茨~J%w~IʦNƧd kU5T?l>4kCэ<VE.h0<2kft_`׳_dd=ׁXp%{^Iy*M4i ,!k;)-ywǓx{oվ[E&J4ݬv'# 1,i9]4GNwDRCY@^{CR}'B/HÃ,"'Eȉa݆-SzR KC l#=ЇA=[ 2~:}NF4CJ`Q^5,a;.̺ԉ~yG{Aq3%q+?يKP{Mib-utd>X2 «.V!b9m}Aq(N<;#=YQ3d|L] o;Y3(#+-F_8={줣h_.8>ke9*7KquAfBDSɫS>91z0U\A}Zç=D\st᳓ń^Fs2 u\x8r,cJvu^_DKPE|;#n]mQrbvl=a ~P*Lv:;[p@n*F\ggމ~s~j7M/ NV&4F#fcF/׬CMw:G?f:RLxC7? ky4`W*z.E\Vм^Q4;5ݠq;g7hOa͒no_jBf%iv|iW)2 t,B/y%z#]$F߀E!Mc9hk.|Ȼ:p 4le!*$롋f'iy/.)1wv.gOJ1J>۶6k+Y3v8z9Z8n4#s/8,kC#8һ?P g--ޅs8\o:#f?EZV E/DN4_54J'dCB $Gi%a :5L8;hbJ?垓ꭊMUpH$Yh$Ft>m'oq9[%7ʲCx GazH'$'Ąi/'110D&jɃh ŭ Yl9l.ƘmZHq8/` dt(عNԕ4%]HңuIXZ/- b]~? D78K6bD?U&25^Y}`|O{3gHޗrςMGy- !Z.h|cBr ٣A.}|U愇/Yvw1.b>&XX6?VO`IbAXJݨ,U mjܬ_ bͯ%"36ps*&;/h(Ԭɿg۸N /SK e͇ {kqKд[I3 Us/eGWk0 L Z'+l峔31l݊Kek0$9kEelIJ̔QŜ7˯߈^tʻ$2~QrtW'fS :*s{wKdۢkBFgsi#A''O>vg)+P ,x$ÞD;U,3_(]:q:[wZԑ ۤtۅ_&ܧ/77ּS P߷dSS[' 3.R ~9D ΕfVc~DΘnQ[++ǯ;Z$[)$t-Rc8@DsAMXd2׃ _Ŕۓ}'kB7NmPވ%ozYړT:?VHbU[D ٷ8"S8=:DC6f3J#7b\fI#z;_%tڞk- }0:->uiF3ȼ)Āꋏ޹UU}0&cO;F?V5[+-LuX9nv"\1dVp'S/EWstxVN嗢7:/ԓz^i. X6&B+]*21ULW]m NcVWv=iEϏV5fS\HtƼ`W-W(M]Zb*fNE}H-w9GPQO)#S鱯l70TdwNA¼Y"}oq2/`4<':mmACm~ߊxD&34 Evv|kr6endstream endobj 676 0 obj << /Type /XRef /Length 340 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 88 0 R /Root 87 0 R /Size 677 /ID [<027101522edcec73a5433101d85ceb7c><101ee438962de34755f8d8799ccab4f7>] >> stream x;KPO"XlC-N ^(AAĩ8" C]u7qSp 98y2IAA8|=>N y8xlK)Ol,úfMx?3Vbh7C|`|0ćbho B8i-Rw؂/0sMOz3s W؎gƇcdO.t߷nUc!n̟8}|HЇg$)3HJe$YuJ]JNbWb{ !osGU7 b\v`S]w`bH=?bԫnAf~+W L endstream endobj startxref 506896 %%EOF kernlab/inst/doc/kernlab.Rnw0000644000175100001440000014230512774400037015547 0ustar hornikusers\documentclass{A} \usepackage{amsfonts,thumbpdf,alltt} \newenvironment{smallverbatim}{\small\verbatim}{\endverbatim} \newenvironment{smallexample}{\begin{alltt}\small}{\end{alltt}} \SweaveOpts{engine=R,eps=FALSE} %\VignetteIndexEntry{kernlab - An S4 Package for Kernel Methods in R} %\VignetteDepends{kernlab} %\VignetteKeywords{kernel methods, support vector machines, quadratic programming, ranking, clustering, S4, R} %\VignettePackage{kernlab} <>= library(kernlab) options(width = 70) @ \title{\pkg{kernlab} -- An \proglang{S4} Package for Kernel Methods in \proglang{R}} \Plaintitle{kernlab - An S4 Package for Kernel Methods in R} \author{Alexandros Karatzoglou\\Technische Universit\"at Wien \And Alex Smola\\Australian National University, NICTA \And Kurt Hornik\\Wirtschaftsuniversit\"at Wien } \Plainauthor{Alexandros Karatzoglou, Alex Smola, Kurt Hornik} \Abstract{ \pkg{kernlab} is an extensible package for kernel-based machine learning methods in \proglang{R}. It takes advantage of \proglang{R}'s new \proglang{S4} object model and provides a framework for creating and using kernel-based algorithms. The package contains dot product primitives (kernels), implementations of support vector machines and the relevance vector machine, Gaussian processes, a ranking algorithm, kernel PCA, kernel CCA, kernel feature analysis, online kernel methods and a spectral clustering algorithm. Moreover it provides a general purpose quadratic programming solver, and an incomplete Cholesky decomposition method. } \Keywords{kernel methods, support vector machines, quadratic programming, ranking, clustering, \proglang{S4}, \proglang{R}} \Plainkeywords{kernel methods, support vector machines, quadratic programming, ranking, clustering, S4, R} \begin{document} \section{Introduction} Machine learning is all about extracting structure from data, but it is often difficult to solve problems like classification, regression and clustering in the space in which the underlying observations have been made. Kernel-based learning methods use an implicit mapping of the input data into a high dimensional feature space defined by a kernel function, i.e., a function returning the inner product $ \langle \Phi(x),\Phi(y) \rangle$ between the images of two data points $x, y$ in the feature space. The learning then takes place in the feature space, provided the learning algorithm can be entirely rewritten so that the data points only appear inside dot products with other points. This is often referred to as the ``kernel trick'' \citep{kernlab:Schoelkopf+Smola:2002}. More precisely, if a projection $\Phi: X \rightarrow H$ is used, the dot product $\langle\Phi(x),\Phi(y)\rangle$ can be represented by a kernel function~$k$ \begin{equation} \label{eq:kernel} k(x,y)= \langle \Phi(x),\Phi(y) \rangle, \end{equation} which is computationally simpler than explicitly projecting $x$ and $y$ into the feature space~$H$. One interesting property of kernel-based systems is that, once a valid kernel function has been selected, one can practically work in spaces of any dimension without paying any computational cost, since feature mapping is never effectively performed. In fact, one does not even need to know which features are being used. Another advantage is the that one can design and use a kernel for a particular problem that could be applied directly to the data without the need for a feature extraction process. This is particularly important in problems where a lot of structure of the data is lost by the feature extraction process (e.g., text processing). The inherent modularity of kernel-based learning methods allows one to use any valid kernel on a kernel-based algorithm. \subsection{Software review} The most prominent kernel based learning algorithm is without doubt the support vector machine (SVM), so the existence of many support vector machine packages comes as little surprise. Most of the existing SVM software is written in \proglang{C} or \proglang{C++}, e.g.\ the award winning \pkg{libsvm}\footnote{\url{http://www.csie.ntu.edu.tw/~cjlin/libsvm/}} \citep{kernlab:Chang+Lin:2001}, \pkg{SVMlight}\footnote{\url{http://svmlight.joachims.org}} \citep{kernlab:joachim:1999}, \pkg{SVMTorch}\footnote{\url{http://www.torch.ch}}, Royal Holloway Support Vector Machines\footnote{\url{http://svm.dcs.rhbnc.ac.uk}}, \pkg{mySVM}\footnote{\url{http://www-ai.cs.uni-dortmund.de/SOFTWARE/MYSVM/index.eng.html}}, and \pkg{M-SVM}\footnote{\url{http://www.loria.fr/~guermeur/}} with many packages providing interfaces to \proglang{MATLAB} (such as \pkg{libsvm}), and even some native \proglang{MATLAB} toolboxes\footnote{ \url{http://www.isis.ecs.soton.ac.uk/resources/svminfo/}}\,\footnote{ \url{http://asi.insa-rouen.fr/~arakotom/toolbox/index}}\,\footnote{ \url{http://www.cis.tugraz.at/igi/aschwaig/software.html}}. Putting SVM specific software aside and considering the abundance of other kernel-based algorithms published nowadays, there is little software available implementing a wider range of kernel methods with some exceptions like the \pkg{Spider}\footnote{\url{http://www.kyb.tuebingen.mpg.de/bs/people/spider/}} software which provides a \proglang{MATLAB} interface to various \proglang{C}/\proglang{C++} SVM libraries and \proglang{MATLAB} implementations of various kernel-based algorithms, \pkg{Torch} \footnote{\url{http://www.torch.ch}} which also includes more traditional machine learning algorithms, and the occasional \proglang{MATLAB} or \proglang{C} program found on a personal web page where an author includes code from a published paper. \subsection[R software]{\proglang{R} software} The \proglang{R} package \pkg{e1071} offers an interface to the award winning \pkg{libsvm} \citep{kernlab:Chang+Lin:2001}, a very efficient SVM implementation. \pkg{libsvm} provides a robust and fast SVM implementation and produces state of the art results on most classification and regression problems \citep{kernlab:Meyer+Leisch+Hornik:2003}. The \proglang{R} interface provided in \pkg{e1071} adds all standard \proglang{R} functionality like object orientation and formula interfaces to \pkg{libsvm}. Another SVM related \proglang{R} package which was made recently available is \pkg{klaR} \citep{kernlab:Roever:2004} which includes an interface to \pkg{SVMlight}, a popular SVM implementation along with other classification tools like Regularized Discriminant Analysis. However, most of the \pkg{libsvm} and \pkg{klaR} SVM code is in \proglang{C++}. Therefore, if one would like to extend or enhance the code with e.g.\ new kernels or different optimizers, one would have to modify the core \proglang{C++} code. \section[kernlab]{\pkg{kernlab}} \pkg{kernlab} aims to provide the \proglang{R} user with basic kernel functionality (e.g., like computing a kernel matrix using a particular kernel), along with some utility functions commonly used in kernel-based methods like a quadratic programming solver, and modern kernel-based algorithms based on the functionality that the package provides. Taking advantage of the inherent modularity of kernel-based methods, \pkg{kernlab} aims to allow the user to switch between kernels on an existing algorithm and even create and use own kernel functions for the kernel methods provided in the package. \subsection[S4 objects]{\proglang{S4} objects} \pkg{kernlab} uses \proglang{R}'s new object model described in ``Programming with Data'' \citep{kernlab:Chambers:1998} which is known as the \proglang{S4} class system and is implemented in the \pkg{methods} package. In contrast with the older \proglang{S3} model for objects in \proglang{R}, classes, slots, and methods relationships must be declared explicitly when using the \proglang{S4} system. The number and types of slots in an instance of a class have to be established at the time the class is defined. The objects from the class are validated against this definition and have to comply to it at any time. \proglang{S4} also requires formal declarations of methods, unlike the informal system of using function names to identify a certain method in \proglang{S3}. An \proglang{S4} method is declared by a call to \code{setMethod} along with the name and a ``signature'' of the arguments. The signature is used to identify the classes of one or more arguments of the method. Generic functions can be declared using the \code{setGeneric} function. Although such formal declarations require package authors to be more disciplined than when using the informal \proglang{S3} classes, they provide assurance that each object in a class has the required slots and that the names and classes of data in the slots are consistent. An example of a class used in \pkg{kernlab} is shown below. Typically, in a return object we want to include information on the result of the method along with additional information and parameters. Usually \pkg{kernlab}'s classes include slots for the kernel function used and the results and additional useful information. \begin{smallexample} setClass("specc", representation("vector", # the vector containing the cluster centers="matrix", # the cluster centers size="vector", # size of each cluster kernelf="function", # kernel function used withinss = "vector"), # within cluster sum of squares prototype = structure(.Data = vector(), centers = matrix(), size = matrix(), kernelf = ls, withinss = vector())) \end{smallexample} Accessor and assignment function are defined and used to access the content of each slot which can be also accessed with the \verb|@| operator. \subsection{Namespace} Namespaces were introduced in \proglang{R} 1.7.0 and provide a means for packages to control the way global variables and methods are being made available. Due to the number of assignment and accessor function involved, a namespace is used to control the methods which are being made visible outside the package. Since \proglang{S4} methods are being used, the \pkg{kernlab} namespace also imports methods and variables from the \pkg{methods} package. \subsection{Data} The \pkg{kernlab} package also includes data set which will be used to illustrate the methods included in the package. The \code{spam} data set \citep{kernlab:Hastie:2001} set collected at Hewlett-Packard Labs contains data on 2788 and 1813 e-mails classified as non-spam and spam, respectively. The 57 variables of each data vector indicate the frequency of certain words and characters in the e-mail. Another data set included in \pkg{kernlab}, the \code{income} data set \citep{kernlab:Hastie:2001}, is taken by a marketing survey in the San Francisco Bay concerning the income of shopping mall customers. It consists of 14 demographic attributes (nominal and ordinal variables) including the income and 8993 observations. The \code{ticdata} data set \citep{kernlab:Putten:2000} was used in the 2000 Coil Challenge and contains information on customers of an insurance company. The data consists of 86 variables and includes product usage data and socio-demographic data derived from zip area codes. The data was collected to answer the following question: Can you predict who would be interested in buying a caravan insurance policy and give an explanation why? The \code{promotergene} is a data set of E. Coli promoter gene sequences (DNA) with 106 observations and 58 variables available at the UCI Machine Learning repository. Promoters have a region where a protein (RNA polymerase) must make contact and the helical DNA sequence must have a valid conformation so that the two pieces of the contact region spatially align. The data contains DNA sequences of promoters and non-promoters. The \code{spirals} data set was created by the \code{mlbench.spirals} function in the \pkg{mlbench} package \citep{kernlab:Leisch+Dimitriadou}. This two-dimensional data set with 300 data points consists of two spirals where Gaussian noise is added to each data point. \subsection{Kernels} A kernel function~$k$ calculates the inner product of two vectors $x$, $x'$ in a given feature mapping $\Phi: X \rightarrow H$. The notion of a kernel is obviously central in the making of any kernel-based algorithm and consequently also in any software package containing kernel-based methods. Kernels in \pkg{kernlab} are \proglang{S4} objects of class \code{kernel} extending the \code{function} class with one additional slot containing a list with the kernel hyper-parameters. Package \pkg{kernlab} includes 7 different kernel classes which all contain the class \code{kernel} and are used to implement the existing kernels. These classes are used in the function dispatch mechanism of the kernel utility functions described below. Existing kernel functions are initialized by ``creator'' functions. All kernel functions take two feature vectors as parameters and return the scalar dot product of the vectors. An example of the functionality of a kernel in \pkg{kernlab}: <>= ## create a RBF kernel function with sigma hyper-parameter 0.05 rbf <- rbfdot(sigma = 0.05) rbf ## create two random feature vectors x <- rnorm(10) y <- rnorm(10) ## compute dot product between x,y rbf(x, y) @ The package includes implementations of the following kernels: \begin{itemize} \item the linear \code{vanilladot} kernel implements the simplest of all kernel functions \begin{equation} k(x,x') = \langle x, x' \rangle \end{equation} which is useful specially when dealing with large sparse data vectors~$x$ as is usually the case in text categorization. \item the Gaussian radial basis function \code{rbfdot} \begin{equation} k(x,x') = \exp(-\sigma \|x - x'\|^2) \end{equation} which is a general purpose kernel and is typically used when no further prior knowledge is available about the data. \item the polynomial kernel \code{polydot} \begin{equation} k(x, x') = \left( \mathrm{scale} \cdot \langle x, x' \rangle + \mathrm{offset} \right)^\mathrm{degree}. \end{equation} which is used in classification of images. \item the hyperbolic tangent kernel \code{tanhdot} \begin{equation} k(x, x') = \tanh \left( \mathrm{scale} \cdot \langle x, x' \rangle + \mathrm{offset} \right) \end{equation} which is mainly used as a proxy for neural networks. \item the Bessel function of the first kind kernel \code{besseldot} \begin{equation} k(x, x') = \frac{\mathrm{Bessel}_{(\nu+1)}^n(\sigma \|x - x'\|)} {(\|x-x'\|)^{-n(\nu+1)}}. \end{equation} is a general purpose kernel and is typically used when no further prior knowledge is available and mainly popular in the Gaussian process community. \item the Laplace radial basis kernel \code{laplacedot} \begin{equation} k(x, x') = \exp(-\sigma \|x - x'\|) \end{equation} which is a general purpose kernel and is typically used when no further prior knowledge is available. \item the ANOVA radial basis kernel \code{anovadot} performs well in multidimensional regression problems \begin{equation} k(x, x') = \left(\sum_{k=1}^{n}\exp(-\sigma(x^k-{x'}^k)^2)\right)^{d} \end{equation} where $x^k$ is the $k$th component of $x$. \end{itemize} \subsection{Kernel utility methods} The package also includes methods for computing commonly used kernel expressions (e.g., the Gram matrix). These methods are written in such a way that they take functions (i.e., kernels) and matrices (i.e., vectors of patterns) as arguments. These can be either the kernel functions already included in \pkg{kernlab} or any other function implementing a valid dot product (taking two vector arguments and returning a scalar). In case one of the already implemented kernels is used, the function calls a vectorized implementation of the corresponding function. Moreover, in the case of symmetric matrices (e.g., the dot product matrix of a Support Vector Machine) they only require one argument rather than having to pass the same matrix twice (for rows and columns). The computations for the kernels already available in the package are vectorized whenever possible which guarantees good performance and acceptable memory requirements. Users can define their own kernel by creating a function which takes two vectors as arguments (the data points) and returns a scalar (the dot product). This function can then be based as an argument to the kernel utility methods. For a user defined kernel the dispatch mechanism calls a generic method implementation which calculates the expression by passing the kernel function through a pair of \code{for} loops. The kernel methods included are: \begin{description} \item[\code{kernelMatrix}] This is the most commonly used function. It computes $k(x, x')$, i.e., it computes the matrix $K$ where $K_{ij} = k(x_i, x_j)$ and $x$ is a \emph{row} vector. In particular, \begin{verbatim} K <- kernelMatrix(kernel, x) \end{verbatim} computes the matrix $K_{ij} = k(x_i, x_j)$ where the $x_i$ are the columns of $X$ and \begin{verbatim} K <- kernelMatrix(kernel, x1, x2) \end{verbatim} computes the matrix $K_{ij} = k(x1_i, x2_j)$. \item[\code{kernelFast}] This method is different to \code{kernelMatrix} for \code{rbfdot}, \code{besseldot}, and the \code{laplacedot} kernel, which are all RBF kernels. It is identical to \code{kernelMatrix}, except that it also requires the squared norm of the first argument as additional input. It is mainly used in kernel algorithms, where columns of the kernel matrix are computed per invocation. In these cases, evaluating the norm of each column-entry as it is done on a \code{kernelMatrix} invocation on an RBF kernel, over and over again would cause significant computational overhead. Its invocation is via \begin{verbatim} K = kernelFast(kernel, x1, x2, a) \end{verbatim} Here $a$ is a vector containing the squared norms of $x1$. \item[\code{kernelMult}] is a convenient way of computing kernel expansions. It returns the vector $f = (f(x_1), \dots, f(x_m))$ where \begin{equation} f(x_i) = \sum_{j=1}^{m} k(x_i, x_j) \alpha_j, \mbox{~hence~} f = K \alpha. \end{equation} The need for such a function arises from the fact that $K$ may sometimes be larger than the memory available. Therefore, it is convenient to compute $K$ only in stripes and discard the latter after the corresponding part of $K \alpha$ has been computed. The parameter \code{blocksize} determines the number of rows in the stripes. In particular, \begin{verbatim} f <- kernelMult(kernel, x, alpha) \end{verbatim} computes $f_i = \sum_{j=1}^m k(x_i, x_j) \alpha_j$ and \begin{verbatim} f <- kernelMult(kernel, x1, x2, alpha) \end{verbatim} computes $f_i = \sum_{j=1}^m k(x1_i, x2_j) \alpha_j$. \item[\code{kernelPol}] is a method very similar to \code{kernelMatrix} with the only difference that rather than computing $K_{ij} = k(x_i, x_j)$ it computes $K_{ij} = y_i y_j k(x_i, x_j)$. This means that \begin{verbatim} K <- kernelPol(kernel, x, y) \end{verbatim} computes the matrix $K_{ij} = y_i y_j k(x_i, x_j)$ where the $x_i$ are the columns of $x$ and $y_i$ are elements of the vector~$y$. Moreover, \begin{verbatim} K <- kernelPol(kernel, x1, x2, y1, y2) \end{verbatim} computes the matrix $K_{ij} = y1_i y2_j k(x1_i, x2_j)$. Both \code{x1} and \code{x2} may be matrices and \code{y1} and \code{y2} vectors. \end{description} An example using these functions : <>= ## create a RBF kernel function with sigma hyper-parameter 0.05 poly <- polydot(degree=2) ## create artificial data set x <- matrix(rnorm(60), 6, 10) y <- matrix(rnorm(40), 4, 10) ## compute kernel matrix kx <- kernelMatrix(poly, x) kxy <- kernelMatrix(poly, x, y) @ \section{Kernel methods} Providing a solid base for creating kernel-based methods is part of what we are trying to achieve with this package, the other being to provide a wider range of kernel-based methods in \proglang{R}. In the rest of the paper we present the kernel-based methods available in \pkg{kernlab}. All the methods in \pkg{kernlab} can be used with any of the kernels included in the package as well as with any valid user-defined kernel. User defined kernel functions can be passed to existing kernel-methods in the \code{kernel} argument. \subsection{Support vector machine} Support vector machines \citep{kernlab:Vapnik:1998} have gained prominence in the field of machine learning and pattern classification and regression. The solutions to classification and regression problems sought by kernel-based algorithms such as the SVM are linear functions in the feature space: \begin{equation} f(x) = w^\top \Phi(x) \end{equation} for some weight vector $w \in F$. The kernel trick can be exploited in this whenever the weight vector~$w$ can be expressed as a linear combination of the training points, $w = \sum_{i=1}^{n} \alpha_i \Phi(x_i)$, implying that $f$ can be written as \begin{equation} f(x) = \sum_{i=1}^{n}\alpha_i k(x_i, x) \end{equation} A very important issue that arises is that of choosing a kernel~$k$ for a given learning task. Intuitively, we wish to choose a kernel that induces the ``right'' metric in the space. Support Vector Machines choose a function $f$ that is linear in the feature space by optimizing some criterion over the sample. In the case of the 2-norm Soft Margin classification the optimization problem takes the form: \begin{eqnarray} \nonumber \mathrm{minimize} && t(w,\xi) = \frac{1}{2}{\|w\|}^2+\frac{C}{m}\sum_{i=1}^{m}\xi_i \\ \mbox{subject to~} && y_i ( \langle x_i , w \rangle +b ) \geq 1- \xi_i \qquad (i=1,\dots,m)\\ \nonumber && \xi_i \ge 0 \qquad (i=1,\dots, m) \end{eqnarray} Based on similar methodology, SVMs deal with the problem of novelty detection (or one class classification) and regression. \pkg{kernlab}'s implementation of support vector machines, \code{ksvm}, is based on the optimizers found in \pkg{bsvm}\footnote{\url{http://www.csie.ntu.edu.tw/~cjlin/bsvm}} \citep{kernlab:Hsu:2002} and \pkg{libsvm} \citep{kernlab:Chang+Lin:2001} which includes a very efficient version of the Sequential Minimization Optimization (SMO). SMO decomposes the SVM Quadratic Problem (QP) without using any numerical QP optimization steps. Instead, it chooses to solve the smallest possible optimization problem involving two elements of $\alpha_i$ because they must obey one linear equality constraint. At every step, SMO chooses two $\alpha_i$ to jointly optimize and finds the optimal values for these $\alpha_i$ analytically, thus avoiding numerical QP optimization, and updates the SVM to reflect the new optimal values. The SVM implementations available in \code{ksvm} include the C-SVM classification algorithm along with the $\nu$-SVM classification formulation which is equivalent to the former but has a more natural ($\nu$) model parameter taking values in $[0,1]$ and is proportional to the fraction of support vectors found in the data set and the training error. For classification problems which include more than two classes (multi-class) a one-against-one or pairwise classification method \citep{kernlab:Knerr:1990, kernlab:Kressel:1999} is used. This method constructs ${k \choose 2}$ classifiers where each one is trained on data from two classes. Prediction is done by voting where each classifier gives a prediction and the class which is predicted more often wins (``Max Wins''). This method has been shown to produce robust results when used with SVMs \citep{kernlab:Hsu2:2002}. Furthermore the \code{ksvm} implementation provides the ability to produce class probabilities as output instead of class labels. This is done by an improved implementation \citep{kernlab:Lin:2001} of Platt's posteriori probabilities \citep{kernlab:Platt:2000} where a sigmoid function \begin{equation} P(y=1\mid f) = \frac{1}{1+ e^{Af+B}} \end{equation} is fitted on the decision values~$f$ of the binary SVM classifiers, $A$ and $B$ are estimated by minimizing the negative log-likelihood function. To extend the class probabilities to the multi-class case, each binary classifiers class probability output is combined by the \code{couple} method which implements methods for combing class probabilities proposed in \citep{kernlab:Wu:2003}. Another approach for multIn order to create a similar probability output for regression, following \cite{kernlab:Weng:2004}, we suppose that the SVM is trained on data from the model \begin{equation} y_i = f(x_i) + \delta_i \end{equation} where $f(x_i)$ is the underlying function and $\delta_i$ is independent and identical distributed random noise. Given a test data $x$ the distribution of $y$ given $x$ and allows one to draw probabilistic inferences about $y$ e.g. one can construct a predictive interval $\Phi = \Phi(x)$ such that $y \in \Phi$ with a certain probability. If $\hat{f}$ is the estimated (predicted) function of the SVM on new data then $\eta = \eta(x) = y - \hat{f}(x)$ is the prediction error and $y \in \Phi$ is equivalent to $\eta \in \Phi $. Empirical observation shows that the distribution of the residuals $\eta$ can be modeled both by a Gaussian and a Laplacian distribution with zero mean. In this implementation the Laplacian with zero mean is used : \begin{equation} p(z) = \frac{1}{2\sigma}e^{-\frac{|z|}{\sigma}} \end{equation} Assuming that $\eta$ are independent the scale parameter $\sigma$ is estimated by maximizing the likelihood. The data for the estimation is produced by a three-fold cross-validation. For the Laplace distribution the maximum likelihood estimate is : \begin{equation} \sigma = \frac{\sum_{i=1}^m|\eta_i|}{m} \end{equation} i-class classification supported by the \code{ksvm} function is the one proposed in \cite{kernlab:Crammer:2000}. This algorithm works by solving a single optimization problem including the data from all classes: \begin{eqnarray} \nonumber \mathrm{minimize} && t(w_n,\xi) = \frac{1}{2}\sum_{n=1}^k{\|w_n\|}^2+\frac{C}{m}\sum_{i=1}^{m}\xi_i \\ \mbox{subject to~} && \langle x_i , w_{y_i} \rangle - \langle x_i , w_{n} \rangle \geq b_i^n - \xi_i \qquad (i=1,\dots,m) \\ \mbox{where} && b_i^n = 1 - \delta_{y_i,n} \end{eqnarray} where the decision function is \begin{equation} \mathrm{argmax}_{m=1,\dots,k} \langle x_i , w_{n} \rangle \end{equation} This optimization problem is solved by a decomposition method proposed in \cite{kernlab:Hsu:2002} where optimal working sets are found (that is, sets of $\alpha_i$ values which have a high probability of being non-zero). The QP sub-problems are then solved by a modified version of the \pkg{TRON}\footnote{\url{http://www-unix.mcs.anl.gov/~more/tron/}} \citep{kernlab:more:1999} optimization software. One-class classification or novelty detection \citep{kernlab:Williamson:1999, kernlab:Tax:1999}, where essentially an SVM detects outliers in a data set, is another algorithm supported by \code{ksvm}. SVM novelty detection works by creating a spherical decision boundary around a set of data points by a set of support vectors describing the spheres boundary. The $\nu$ parameter is used to control the volume of the sphere and consequently the number of outliers found. Again, the value of $\nu$ represents the fraction of outliers found. Furthermore, $\epsilon$-SVM \citep{kernlab:Vapnik2:1995} and $\nu$-SVM \citep{kernlab:Smola1:2000} regression are also available. The problem of model selection is partially addressed by an empirical observation for the popular Gaussian RBF kernel \citep{kernlab:Caputo:2002}, where the optimal values of the hyper-parameter of sigma are shown to lie in between the 0.1 and 0.9 quantile of the $\|x- x'\| $ statistics. The \code{sigest} function uses a sample of the training set to estimate the quantiles and returns a vector containing the values of the quantiles. Pretty much any value within this interval leads to good performance. An example for the \code{ksvm} function is shown below. <>= ## simple example using the promotergene data set data(promotergene) ## create test and training set tindex <- sample(1:dim(promotergene)[1],5) genetrain <- promotergene[-tindex, ] genetest <- promotergene[tindex,] ## train a support vector machine gene <- ksvm(Class~.,data=genetrain,kernel="rbfdot",kpar="automatic",C=60,cross=3,prob.model=TRUE) gene predict(gene, genetest) predict(gene, genetest, type="probabilities") @ \begin{figure} \centering <>= set.seed(123) x <- rbind(matrix(rnorm(120),,2),matrix(rnorm(120,mean=3),,2)) y <- matrix(c(rep(1,60),rep(-1,60))) svp <- ksvm(x,y,type="C-svc") plot(svp,data=x) @ \caption{A contour plot of the SVM decision values for a toy binary classification problem using the \code{plot} function} \label{fig:ksvm Plot} \end{figure} \subsection{Relevance vector machine} The relevance vector machine \citep{kernlab:Tipping:2001} is a probabilistic sparse kernel model identical in functional form to the SVM making predictions based on a function of the form \begin{equation} y(x) = \sum_{n=1}^{N} \alpha_n K(\mathbf{x},\mathbf{x}_n) + a_0 \end{equation} where $\alpha_n$ are the model ``weights'' and $K(\cdotp,\cdotp)$ is a kernel function. It adopts a Bayesian approach to learning, by introducing a prior over the weights $\alpha$ \begin{equation} p(\alpha, \beta) = \prod_{i=1}^m N(\beta_i \mid 0 , a_i^{-1}) \mathrm{Gamma}(\beta_i\mid \beta_\beta , \alpha_\beta) \end{equation} governed by a set of hyper-parameters $\beta$, one associated with each weight, whose most probable values are iteratively estimated for the data. Sparsity is achieved because in practice the posterior distribution in many of the weights is sharply peaked around zero. Furthermore, unlike the SVM classifier, the non-zero weights in the RVM are not associated with examples close to the decision boundary, but rather appear to represent ``prototypical'' examples. These examples are termed \emph{relevance vectors}. \pkg{kernlab} currently has an implementation of the RVM based on a type~II maximum likelihood method which can be used for regression. The functions returns an \proglang{S4} object containing the model parameters along with indexes for the relevance vectors and the kernel function and hyper-parameters used. <>= x <- seq(-20, 20, 0.5) y <- sin(x)/x + rnorm(81, sd = 0.03) y[41] <- 1 @ <>= rvmm <- rvm(x, y,kernel="rbfdot",kpar=list(sigma=0.1)) rvmm ytest <- predict(rvmm, x) @ \begin{figure} \centering <>= plot(x, y, cex=0.5) lines(x, ytest, col = "red") points(x[RVindex(rvmm)],y[RVindex(rvmm)],pch=21) @ \caption{Relevance vector regression on data points created by the $sinc(x)$ function, relevance vectors are shown circled.} \label{fig:RVM sigmoid} \end{figure} \subsection{Gaussian processes} Gaussian processes \citep{kernlab:Williams:1995} are based on the ``prior'' assumption that adjacent observations should convey information about each other. In particular, it is assumed that the observed variables are normal, and that the coupling between them takes place by means of the covariance matrix of a normal distribution. Using the kernel matrix as the covariance matrix is a convenient way of extending Bayesian modeling of linear estimators to nonlinear situations. Furthermore it represents the counterpart of the ``kernel trick'' in methods minimizing the regularized risk. For regression estimation we assume that rather than observing $t(x_i)$ we observe $y_i = t(x_i) + \xi_i$ where $\xi_i$ is assumed to be independent Gaussian distributed noise with zero mean. The posterior distribution is given by \begin{equation} p(\mathbf{y}\mid \mathbf{t}) = \left[ \prod_ip(y_i - t(x_i)) \right] \frac{1}{\sqrt{(2\pi)^m \det(K)}} \exp \left(\frac{1}{2}\mathbf{t}^T K^{-1} \mathbf{t} \right) \end{equation} and after substituting $\mathbf{t} = K\mathbf{\alpha}$ and taking logarithms \begin{equation} \ln{p(\mathbf{\alpha} \mid \mathbf{y})} = - \frac{1}{2\sigma^2}\| \mathbf{y} - K \mathbf{\alpha} \|^2 -\frac{1}{2}\mathbf{\alpha}^T K \mathbf{\alpha} +c \end{equation} and maximizing $\ln{p(\mathbf{\alpha} \mid \mathbf{y})}$ for $\mathbf{\alpha}$ to obtain the maximum a posteriori approximation yields \begin{equation} \mathbf{\alpha} = (K + \sigma^2\mathbf{1})^{-1} \mathbf{y} \end{equation} Knowing $\mathbf{\alpha}$ allows for prediction of $y$ at a new location $x$ through $y = K(x,x_i){\mathbf{\alpha}}$. In similar fashion Gaussian processes can be used for classification. \code{gausspr} is the function in \pkg{kernlab} implementing Gaussian processes for classification and regression. \subsection{Ranking} The success of Google has vividly demonstrated the value of a good ranking algorithm in real world problems. \pkg{kernlab} includes a ranking algorithm based on work published in \citep{kernlab:Zhou:2003}. This algorithm exploits the geometric structure of the data in contrast to the more naive approach which uses the Euclidean distances or inner products of the data. Since real world data are usually highly structured, this algorithm should perform better than a simpler approach based on a Euclidean distance measure. First, a weighted network is defined on the data and an authoritative score is assigned to every point. The query points act as source nodes that continually pump their scores to the remaining points via the weighted network, and the remaining points further spread the score to their neighbors. The spreading process is repeated until convergence and the points are ranked according to the scores they received. Suppose we are given a set of data points $X = {x_1, \dots, x_{s}, x_{s+1}, \dots, x_{m}}$ in $\mathbf{R}^n$ where the first $s$ points are the query points and the rest are the points to be ranked. The algorithm works by connecting the two nearest points iteratively until a connected graph $G = (X, E)$ is obtained where $E$ is the set of edges. The affinity matrix $K$ defined e.g.\ by $K_{ij} = \exp(-\sigma\|x_i - x_j \|^2)$ if there is an edge $e(i,j) \in E$ and $0$ for the rest and diagonal elements. The matrix is normalized as $L = D^{-1/2}KD^{-1/2}$ where $D_{ii} = \sum_{j=1}^m K_{ij}$, and \begin{equation} f(t+1) = \alpha Lf(t) + (1 - \alpha)y \end{equation} is iterated until convergence, where $\alpha$ is a parameter in $[0,1)$. The points are then ranked according to their final scores $f_{i}(t_f)$. \pkg{kernlab} includes an \proglang{S4} method implementing the ranking algorithm. The algorithm can be used both with an edge-graph where the structure of the data is taken into account, and without which is equivalent to ranking the data by their distance in the projected space. \begin{figure} \centering <>= data(spirals) ran <- spirals[rowSums(abs(spirals) < 0.55) == 2,] ranked <- ranking(ran, 54, kernel = "rbfdot", kpar = list(sigma = 100), edgegraph = TRUE) ranked[54, 2] <- max(ranked[-54, 2]) c<-1:86 op <- par(mfrow = c(1, 2),pty="s") plot(ran) plot(ran, cex=c[ranked[,3]]/40) @ \caption{The points on the left are ranked according to their similarity to the upper most left point. Points with a higher rank appear bigger. Instead of ranking the points on simple Euclidean distance the structure of the data is recognized and all points on the upper structure are given a higher rank although further away in distance than points in the lower structure.} \label{fig:Ranking} \end{figure} \subsection{Online learning with kernels} The \code{onlearn} function in \pkg{kernlab} implements the online kernel algorithms for classification, novelty detection and regression described in \citep{kernlab:Kivinen:2004}. In batch learning, it is typically assumed that all the examples are immediately available and are drawn independently from some distribution $P$. One natural measure of quality for some $f$ in that case is the expected risk \begin{equation} R[f,P] := E_{(x,y)~P}[l(f(x),y)] \end{equation} Since usually $P$ is unknown a standard approach is to instead minimize the empirical risk \begin{equation} R_{emp}[f,P] := \frac{1}{m}\sum_{t=1}^m l(f(x_t),y_t) \end{equation} Minimizing $R_{emp}[f]$ may lead to overfitting (complex functions that fit well on the training data but do not generalize to unseen data). One way to avoid this is to penalize complex functions by instead minimizing the regularized risk. \begin{equation} R_{reg}[f,S] := R_{reg,\lambda}[f,S] := R_{emp}[f] = \frac{\lambda}{2}\|f\|_{H}^2 \end{equation} where $\lambda > 0$ and $\|f\|_{H} = {\langle f,f \rangle}_{H}^{\frac{1}{2}}$ does indeed measure the complexity of $f$ in a sensible way. The constant $\lambda$ needs to be chosen appropriately for each problem. Since in online learning one is interested in dealing with one example at the time the definition of an instantaneous regularized risk on a single example is needed \begin{equation} R_inst[f,x,y] := R_{inst,\lambda}[f,x,y] := R_{reg,\lambda}[f,((x,y))] \end{equation} The implemented algorithms are classical stochastic gradient descent algorithms performing gradient descent on the instantaneous risk. The general form of the update rule is : \begin{equation} f_{t+1} = f_t - \eta \partial_f R_{inst,\lambda}[f,x_t,y_t]|_{f=f_t} \end{equation} where $f_i \in H$ and $\partial_f$< is short hand for $\partial \ \partial f$ (the gradient with respect to $f$) and $\eta_t > 0$ is the learning rate. Due to the learning taking place in a \textit{reproducing kernel Hilbert space} $H$ the kernel $k$ used has the property $\langle f,k(x,\cdotp)\rangle_H = f(x)$ and therefore \begin{equation} \partial_f l(f(x_t)),y_t) = l'(f(x_t),y_t)k(x_t,\cdotp) \end{equation} where $l'(z,y) := \partial_z l(z,y)$. Since $\partial_f\|f\|_H^2 = 2f$ the update becomes \begin{equation} f_{t+1} := (1 - \eta\lambda)f_t -\eta_t \lambda '( f_t(x_t),y_t)k(x_t,\cdotp) \end{equation} The \code{onlearn} function implements the online learning algorithm for regression, classification and novelty detection. The online nature of the algorithm requires a different approach to the use of the function. An object is used to store the state of the algorithm at each iteration $t$ this object is passed to the function as an argument and is returned at each iteration $t+1$ containing the model parameter state at this step. An empty object of class \code{onlearn} is initialized using the \code{inlearn} function. <>= ## create toy data set x <- rbind(matrix(rnorm(90),,2),matrix(rnorm(90)+3,,2)) y <- matrix(c(rep(1,45),rep(-1,45)),,1) ## initialize onlearn object on <- inlearn(2,kernel="rbfdot",kpar=list(sigma=0.2),type="classification") ind <- sample(1:90,90) ## learn one data point at the time for(i in ind) on <- onlearn(on,x[i,],y[i],nu=0.03,lambda=0.1) sign(predict(on,x)) @ \subsection{Spectral clustering} Spectral clustering \citep{kernlab:Ng:2001} is a recently emerged promising alternative to common clustering algorithms. In this method one uses the top eigenvectors of a matrix created by some similarity measure to cluster the data. Similarly to the ranking algorithm, an affinity matrix is created out from the data as \begin{equation} K_{ij}=\exp(-\sigma\|x_i - x_j \|^2) \end{equation} and normalized as $L = D^{-1/2}KD^{-1/2}$ where $D_{ii} = \sum_{j=1}^m K_{ij}$. Then the top $k$ eigenvectors (where $k$ is the number of clusters to be found) of the affinity matrix are used to form an $n \times k$ matrix $Y$ where each column is normalized again to unit length. Treating each row of this matrix as a data point, \code{kmeans} is finally used to cluster the points. \pkg{kernlab} includes an \proglang{S4} method called \code{specc} implementing this algorithm which can be used through an formula interface or a matrix interface. The \proglang{S4} object returned by the method extends the class ``vector'' and contains the assigned cluster for each point along with information on the centers size and within-cluster sum of squares for each cluster. In case a Gaussian RBF kernel is being used a model selection process can be used to determine the optimal value of the $\sigma$ hyper-parameter. For a good value of $\sigma$ the values of $Y$ tend to cluster tightly and it turns out that the within cluster sum of squares is a good indicator for the ``quality'' of the sigma parameter found. We then iterate through the sigma values to find an optimal value for $\sigma$. \begin{figure} \centering <>= data(spirals) sc <- specc(spirals, centers=2) plot(spirals, pch=(23 - 2*sc)) @ \caption{Clustering the two spirals data set with \code{specc}} \label{fig:Spectral Clustering} \end{figure} \subsection{Kernel principal components analysis} Principal component analysis (PCA) is a powerful technique for extracting structure from possibly high-dimensional datasets. PCA is an orthogonal transformation of the coordinate system in which we describe the data. The new coordinates by which we represent the data are called principal components. Kernel PCA \citep{kernlab:Schoelkopf:1998} performs a nonlinear transformation of the coordinate system by finding principal components which are nonlinearly related to the input variables. Given a set of centered observations $x_k$, $k=1,\dots,M$, $x_k \in \mathbf{R}^N$, PCA diagonalizes the covariance matrix $C = \frac{1}{M}\sum_{j=1}^Mx_jx_{j}^T$ by solving the eigenvalue problem $\lambda\mathbf{v}=C\mathbf{v}$. The same computation can be done in a dot product space $F$ which is related to the input space by a possibly nonlinear map $\Phi:\mathbf{R}^N \rightarrow F$, $x \mapsto \mathbf{X}$. Assuming that we deal with centered data and use the covariance matrix in $F$, \begin{equation} \hat{C}=\frac{1}{C}\sum_{j=1}^N \Phi(x_j)\Phi(x_j)^T \end{equation} the kernel principal components are then computed by taking the eigenvectors of the centered kernel matrix $K_{ij} = \langle \Phi(x_j),\Phi(x_j) \rangle$. \code{kpca}, the the function implementing KPCA in \pkg{kernlab}, can be used both with a formula and a matrix interface, and returns an \proglang{S4} object of class \code{kpca} containing the principal components the corresponding eigenvalues along with the projection of the training data on the new coordinate system. Furthermore, the \code{predict} function can be used to embed new data points into the new coordinate system. \begin{figure} \centering <>= data(spam) train <- sample(1:dim(spam)[1],400) kpc <- kpca(~.,data=spam[train,-58],kernel="rbfdot",kpar=list(sigma=0.001),features=2) kpcv <- pcv(kpc) plot(rotated(kpc),col=as.integer(spam[train,58]),xlab="1st Principal Component",ylab="2nd Principal Component") @ \caption{Projection of the spam data on two kernel principal components using an RBF kernel} \label{fig:KPCA} \end{figure} \subsection{Kernel feature analysis} Whilst KPCA leads to very good results there are nevertheless some issues to be addressed. First the computational complexity of the standard version of KPCA, the algorithm scales $O(m^3)$ and secondly the resulting feature extractors are given as a dense expansion in terms of the of the training patterns. Sparse solutions are often achieved in supervised learning settings by using an $l_1$ penalty on the expansion coefficients. An algorithm can be derived using the same approach in feature extraction requiring only $n$ basis functions to compute the first $n$ feature. Kernel feature analysis \citep{kernlab:Olvi:2000} is computationally simple and scales approximately one order of magnitude better on large data sets than standard KPCA. Choosing $\Omega [f] = \sum_{i=1}^m |\alpha_i |$ this yields \begin{equation} F_{LP} = \{ \mathbf{w} \vert \mathbf{w} = \sum_{i=1}^m \alpha_i \Phi(x_i) \mathrm{with} \sum_{i=1}^m |\alpha_i | \leq 1 \} \end{equation} This setting leads to the first ``principal vector'' in the $l_1$ context \begin{equation} \mathbf{\nu}^1 = \mathrm{argmax}_{\mathbf{\nu} \in F_{LP}} \frac{1}{m} \sum_{i=1}^m \langle \mathbf{\nu},\mathbf{\Phi}(x_i) - \frac{1}{m}\sum_{j=1}^m\mathbf{\Phi}(x_i) \rangle^2 \end{equation} Subsequent ``principal vectors'' can be defined by enforcing optimality with respect to the remaining orthogonal subspaces. Due to the $l_1$ constrain the solution has the favorable property of being sparse in terms of the coefficients $\alpha_i$. The function \code{kfa} in \pkg{kernlab} implements Kernel Feature Analysis by using a projection pursuit technique on a sample of the data. Results are then returned in an \proglang{S4} object. \begin{figure} \centering <>= data(promotergene) f <- kfa(~.,data=promotergene,features=2,kernel="rbfdot",kpar=list(sigma=0.013)) plot(predict(f,promotergene),col=as.numeric(promotergene[,1]),xlab="1st Feature",ylab="2nd Feature") @ \caption{Projection of the spam data on two features using an RBF kernel} \label{fig:KFA} \end{figure} \subsection{Kernel canonical correlation analysis} Canonical correlation analysis (CCA) is concerned with describing the linear relations between variables. If we have two data sets $x_1$ and $x_2$, then the classical CCA attempts to find linear combination of the variables which give the maximum correlation between the combinations. I.e., if \begin{eqnarray*} && y_1 = \mathbf{w_1}\mathbf{x_1} = \sum_j w_1 x_{1j} \\ && y_2 = \mathbf{w_2}\mathbf{x_2} = \sum_j w_2 x_{2j} \end{eqnarray*} one wishes to find those values of $\mathbf{w_1}$ and $\mathbf{w_2}$ which maximize the correlation between $y_1$ and $y_2$. Similar to the KPCA algorithm, CCA can be extended and used in a dot product space~$F$ which is related to the input space by a possibly nonlinear map $\Phi:\mathbf{R}^N \rightarrow F$, $x \mapsto \mathbf{X}$ as \begin{eqnarray*} && y_1 = \mathbf{w_1}\mathbf{\Phi(x_1)} = \sum_j w_1 \Phi(x_{1j}) \\ && y_2 = \mathbf{w_2}\mathbf{\Phi(x_2)} = \sum_j w_2 \Phi(x_{2j}) \end{eqnarray*} Following \citep{kernlab:kuss:2003}, the \pkg{kernlab} implementation of a KCCA projects the data vectors on a new coordinate system using KPCA and uses linear CCA to retrieve the correlation coefficients. The \code{kcca} method in \pkg{kernlab} returns an \proglang{S4} object containing the correlation coefficients for each data set and the corresponding correlation along with the kernel used. \subsection{Interior point code quadratic optimizer} In many kernel based algorithms, learning implies the minimization of some risk function. Typically we have to deal with quadratic or general convex problems for support vector machines of the type \begin{equation} \begin{array}{ll} \mathrm{minimize} & f(x) \\ \mbox{subject to~} & c_i(x) \leq 0 \mbox{~for all~} i \in [n]. \end{array} \end{equation} $f$ and $c_i$ are convex functions and $n \in \mathbf{N}$. \pkg{kernlab} provides the \proglang{S4} method \code{ipop} implementing an optimizer of the interior point family \citep{kernlab:Vanderbei:1999} which solves the quadratic programming problem \begin{equation} \begin{array}{ll} \mathrm{minimize} & c^\top x+\frac{1}{2}x^\top H x \\ \mbox{subject to~} & b \leq Ax \leq b + r\\ & l \leq x \leq u \\ \end{array} \end{equation} This optimizer can be used in regression, classification, and novelty detection in SVMs. \subsection{Incomplete cholesky decomposition} When dealing with kernel based algorithms, calculating a full kernel matrix should be avoided since it is already a $O(N^2)$ operation. Fortunately, the fact that kernel matrices are positive semidefinite is a strong constraint and good approximations can be found with small computational cost. The Cholesky decomposition factorizes a positive semidefinite $N \times N$ matrix $K$ as $K=ZZ^T$, where $Z$ is an upper triangular $N \times N$ matrix. Exploiting the fact that kernel matrices are usually of low rank, an \emph{incomplete Cholesky decomposition} \citep{kernlab:Wright:1999} finds a matrix $\tilde{Z}$ of size $N \times M$ where $M\ll N$ such that the norm of $K-\tilde{Z}\tilde{Z}^T$ is smaller than a given tolerance $\theta$. The main difference of incomplete Cholesky decomposition to the standard Cholesky decomposition is that pivots which are below a certain threshold are simply skipped. If $L$ is the number of skipped pivots, we obtain a $\tilde{Z}$ with only $M = N - L$ columns. The algorithm works by picking a column from $K$ to be added by maximizing a lower bound on the reduction of the error of the approximation. \pkg{kernlab} has an implementation of an incomplete Cholesky factorization called \code{inc.chol} which computes the decomposed matrix $\tilde{Z}$ from the original data for any given kernel without the need to compute a full kernel matrix beforehand. This has the advantage that no full kernel matrix has to be stored in memory. \section{Conclusions} In this paper we described \pkg{kernlab}, a flexible and extensible kernel methods package for \proglang{R} with existing modern kernel algorithms along with tools for constructing new kernel based algorithms. It provides a unified framework for using and creating kernel-based algorithms in \proglang{R} while using all of \proglang{R}'s modern facilities, like \proglang{S4} classes and namespaces. Our aim for the future is to extend the package and add more kernel-based methods as well as kernel relevant tools. Sources and binaries for the latest version of \pkg{kernlab} are available at CRAN\footnote{\url{http://CRAN.R-project.org}} under the GNU Public License. A shorter version of this introduction to the \proglang{R} package \pkg{kernlab} is published as \cite{kernlab:Karatzoglou+Smola+Hornik:2004} in the \emph{Journal of Statistical Software}. \bibliography{jss} \end{document} kernlab/src/0000755000175100001440000000000012774400037012501 5ustar hornikuserskernlab/src/iweightfactory.h0000644000175100001440000000323312774400037015703 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/I_WeightFactory.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 #ifndef I_WEIGHTFACTORY_H #define I_WEIGHTFACTORY_H #include "datatype.h" #include "errorcode.h" /// Weight Factory interface for string kernel class I_WeightFactory { public: /// Constructor I_WeightFactory(){} /// Destructor virtual ~I_WeightFactory(){} /// Compute edge weight between floor interval and the end of matched substring. virtual ErrorCode ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight) = 0; }; #endif kernlab/src/stringkernel.h0000644000175100001440000000542612774400037015370 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/StringKernel.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 // 10 Aug 2006 #ifndef STRINGKERNEL_H #define STRINGKERNEL_H #include "datatype.h" #include "errorcode.h" #include "esa.h" #include "isafactory.h" #include "ilcpfactory.h" #include "iweightfactory.h" //#include "W_msufsort.h" #include "wkasailcp.h" #include "cweight.h" #include "expdecayweight.h" #include "brweight.h" #include "kspectrumweight.h" //' Types of substring weighting functions enum WeightFunction{CONSTANT, EXPDECAY, KSPECTRUM, BOUNDRANGE}; using namespace std; class StringKernel { public: /// Variables ESA *esa; I_WeightFactory *weigher; Real *val; //' val array. Storing precomputed val(t) values. Real *lvs; //' leaves array. Storing weights for leaves. /// Constructors StringKernel(); //' Given contructed suffix array StringKernel(ESA *esa_, int weightfn, Real param, int verb=INFO); //' Given text, build suffix array for it StringKernel(const UInt32 &size, SYMBOL *text, int weightfn, Real param, int verb=INFO); /// Destructor virtual ~StringKernel(); //' Methods /// Precompute the contribution of each intervals (or internal nodes) void PrecomputeVal(); /// Compute Kernel matrix void Compute_K(SYMBOL *xprime, const UInt32 &xprime_len, Real &value); /// Set leaves array, lvs[] void Set_Lvs(const Real *leafWeight, const UInt32 *len, const UInt32 &m); /// Set leaves array as lvs[i]=i for i=0 to esa->length void Set_Lvs(); private: int _verb; /// An iterative auxiliary function used in PrecomputeVal() void IterativeCompute(const UInt32 &left, const UInt32 &right); }; #endif kernlab/src/introsort.h0000644000175100001440000001560012774400037014717 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the MSufSort suffix sorting algorithm (Version 2.2). * * The Initial Developer of the Original Code is * Michael A. Maniscalco * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Michael A. Maniscalco * * ***** END LICENSE BLOCK ***** */ #ifndef TERNARY_INTRO_SORT_H #define TERNARY_INTRO_SORT_H //======================================================================// // Class: IntroSort // // // // Template based implementation of Introspective sorting algorithm // // using a ternary quicksort. // // // // Author: M.A. Maniscalco // // Date: January 20, 2005 // // // //======================================================================// // *** COMPILER WARNING DISABLED *** // Disable a warning which appears in MSVC // "conversion from '__w64 int' to ''" // Just plain annoying ... Restored at end of this file. #ifdef WIN32 #pragma warning (disable : 4244) #endif #define MIN_LENGTH_FOR_QUICKSORT 32 #define MAX_DEPTH_BEFORE_HEAPSORT 128 //===================================================================== // IntroSort class declaration // Notes: Any object used with this class must implement the following // the operators: <=, >=, == //===================================================================== template void IntroSort(T * array, unsigned int count); template void Partition(T * left, unsigned int count, unsigned int depth = 0); template T SelectPivot(T value1, T value2, T value3); template void Swap(T * valueA, T * valueB); template void InsertionSort(T * array, unsigned int count); template void HeapSort(T * array, int length); template void HeapSort(T * array, int k, int N); template inline void IntroSort(T * array, unsigned int count) { // Public method used to invoke the sort. // Call quick sort partition method if there are enough // elements to warrant it or insertion sort otherwise. if (count >= MIN_LENGTH_FOR_QUICKSORT) Partition(array, count); InsertionSort(array, count); } template inline void Swap(T * valueA, T * valueB) { // do the ol' "switch-a-me-do" on two values. T temp = *valueA; *valueA = *valueB; *valueB = temp; } template inline T SelectPivot(T value1, T value2, T value3) { // middle of three method. if (value1 < value2) return ((value2 < value3) ? value2 : (value1 < value3) ? value3 : value1); return ((value1 < value3) ? value1 : (value2 < value3) ? value3 : value2); } template inline void Partition(T * left, unsigned int count, unsigned int depth) { if (++depth > MAX_DEPTH_BEFORE_HEAPSORT) { // If enough recursion has happened then we bail to heap sort since it looks // as if we are experiencing a 'worst case' for quick sort. This should not // happen very often at all. HeapSort(left, count); return; } T * right = left + count - 1; T * startingLeft = left; T * startingRight = right; T * equalLeft = left; T * equalRight = right; // select the pivot value. T pivot = SelectPivot(left[0], right[0], left[((right - left) >> 1)]); // do three way partitioning. do { while ((left < right) && (*left <= pivot)) if (*(left++) == pivot) Swap(equalLeft++, left - 1); // equal to pivot value. move to far left. while ((left < right) && (*right >= pivot)) if (*(right--) == pivot) Swap(equalRight--, right + 1); // equal to pivot value. move to far right. if (left >= right) { if (left == right) { if (*left >= pivot) left--; if (*right <= pivot) right++; } else { left--; right++; } break; // done partitioning } // left and right are ready for swaping Swap(left++, right--); } while (true); // move values that were equal to pivot from the far left into the middle. // these values are now placed in their final sorted position. if (equalLeft > startingLeft) while (equalLeft > startingLeft) Swap(--equalLeft, left--); // move values that were equal to pivot from the far right into the middle. // these values are now placed in their final sorted position. if (equalRight < startingRight) while (equalRight < startingRight) Swap(++equalRight, right++); // Calculate new partition sizes ... unsigned int leftSize = left - startingLeft + 1; unsigned int rightSize = startingRight - right + 1; // Partition left (less than pivot) if there are enough values to warrant it // otherwise do insertion sort on the values. if (leftSize >= MIN_LENGTH_FOR_QUICKSORT) Partition(startingLeft, leftSize, depth); // Partition right (greater than pivot) if there are enough values to warrant it // otherwise do insertion sort on the values. if (rightSize >= MIN_LENGTH_FOR_QUICKSORT) Partition(right, rightSize, depth); } template inline void InsertionSort(T * array, unsigned int count) { // A basic insertion sort. if (count < 3) { if ((count == 2) && (array[0] > array[1])) Swap(array, array + 1); return; } T * ptr2, * ptr3 = array + 1, * ptr4 = array + count; if (array[0] > array[1]) Swap(array, array + 1); while (true) { while ((++ptr3 < ptr4) && (ptr3[0] >= ptr3[-1])); if (ptr3 >= ptr4) break; if (ptr3[-2] <= ptr3[0]) { if (ptr3[-1] > ptr3[0]) Swap(ptr3, ptr3 - 1); } else { ptr2 = ptr3 - 1; T v = *ptr3; while ((ptr2 >= array) && (ptr2[0] > v)) { ptr2[1] = ptr2[0]; ptr2--; } ptr2[1] = v; } } } template inline void HeapSort(T * array, int length) { // A basic heapsort. for (int k = length >> 1; k > 0; k--) HeapSort(array, k, length); do { Swap(array, array + (--length)); HeapSort(array, 1, length); } while (length > 1); } template inline void HeapSort(T * array, int k, int N) { // A basic heapsort. T temp = array[k - 1]; int n = N >> 1; int j = (k << 1); while (k <= n) { if ((j < N) && (array[j - 1] < array[j])) j++; if (temp >= array[j - 1]) break; else { array[k - 1] = array[j - 1]; k = j; j <<= 1; } } array[k - 1] = temp; } // Restore the default warning which appears in MSVC for // warning #4244 which was disabled at top of this file. #ifdef WIN32 #pragma warning (default : 4244) #endif #endif kernlab/src/Makevars0000644000175100001440000000006012774400037014171 0ustar hornikusersPKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) kernlab/src/dbreakpt.c0000644000175100001440000000417112774400037014444 0ustar hornikusersextern double mymin(double, double); extern double mymax(double, double); void dbreakpt(int n, double *x, double *xl, double *xu, double *w, int *nbrpt, double *brptmin, double *brptmax) { /* c ********** c c Subroutine dbreakpt c c This subroutine computes the number of break-points, and c the minimal and maximal break-points of the projection of c x + alpha*w on the n-dimensional interval [xl,xu]. c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x specifies the vector x. c On exit x is unchanged. c c xl is a double precision array of dimension n. c On entry xl is the vector of lower bounds. c On exit xl is unchanged. c c xu is a double precision array of dimension n. c On entry xu is the vector of upper bounds. c On exit xu is unchanged. c c w is a double precision array of dimension n. c On entry w specifies the vector w. c On exit w is unchanged. c c nbrpt is an integer variable. c On entry nbrpt need not be specified. c On exit nbrpt is the number of break points. c c brptmin is a double precision variable c On entry brptmin need not be specified. c On exit brptmin is minimal break-point. c c brptmax is a double precision variable c On entry brptmax need not be specified. c On exit brptmax is maximal break-point. c c ********** */ int i; double brpt; *nbrpt = 0; for (i=0;i 0) { (*nbrpt)++; brpt = (xu[i] - x[i])/w[i]; if (*nbrpt == 1) *brptmin = *brptmax = brpt; else { *brptmin = mymin(brpt, *brptmin); *brptmax = mymax(brpt, *brptmax); } } else if (x[i] > xl[i] && w[i] < 0) { (*nbrpt)++; brpt = (xl[i] - x[i])/w[i]; if (*nbrpt == 1) *brptmin = *brptmax = brpt; else { *brptmin = mymin(brpt, *brptmin); *brptmax = mymax(brpt, *brptmax); } } if (*nbrpt == 0) *brptmin = *brptmax = 0; } kernlab/src/kspectrumweight.h0000644000175100001440000000326212774400037016102 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/KSpectrumWeight.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 #ifndef KSPECTRUMWEIGHT_H #define KSPECTRUMWEIGHT_H #include "datatype.h" #include "errorcode.h" #include "iweightfactory.h" #include //' K-spectrum weight class class KSpectrumWeight : public I_WeightFactory { Real k; public: /// Constructor KSpectrumWeight(const Real & k_=5.0):k(k_) {} /// Destructor virtual ~KSpectrumWeight(){} /// Compute weight ErrorCode ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight); }; #endif kernlab/src/dtron.c0000644000175100001440000001705712774400037014005 0ustar hornikusers#include #include #include #include #include extern void *xmalloc(size_t); extern double mymin(double, double); extern double mymax(double, double); extern int ufv(int, double *, double *); extern int ugrad(int, double *, double *); extern int uhes(int, double *, double **); /* LEVEL 1 BLAS */ /*extern double dnrm2_(int *, double *, int *);*/ /*extern double ddot_(int *, double *, int *, double *, int *);*/ /* LEVEL 2 BLAS */ /*extern int dsymv_(char *, int *, double *, double *, int *, double *, int *, double *, double *, int *);*/ /* MINPACK 2 */ extern double dgpnrm(int, double *, double *, double *, double *); extern void dcauchy(int, double *, double *, double *, double *, double *, double, double *, double *, double *); extern void dspcg(int, double *, double *, double *, double *, double *, double, double, double *, int *); void dtron(int n, double *x, double *xl, double *xu, double gtol, double frtol, double fatol, double fmin, int maxfev, double cgtol) { /* c ********* c c Subroutine dtron c c The optimization problem of BSVM is a bound-constrained quadratic c optimization problem and its Hessian matrix is positive semidefinite. c We modified the optimization solver TRON by Chih-Jen Lin and c Jorge More' into this version which is suitable for this c special case. c c This subroutine implements a trust region Newton method for the c solution of large bound-constrained quadratic optimization problems c c min { f(x)=0.5*x'*A*x + g0'*x : xl <= x <= xu } c c where the Hessian matrix A is dense and positive semidefinite. The c user must define functions which evaluate the function, gradient, c and the Hessian matrix. c c The user must choose an initial approximation x to the minimizer, c lower bounds, upper bounds, quadratic terms, linear terms, and c constants about termination criterion. c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x specifies the vector x. c On exit x is the final minimizer. c c xl is a double precision array of dimension n. c On entry xl is the vector of lower bounds. c On exit xl is unchanged. c c xu is a double precision array of dimension n. c On entry xu is the vector of upper bounds. c On exit xu is unchanged. c c gtol is a double precision variable. c On entry gtol specifies the relative error of the projected c gradient. c On exit gtol is unchanged. c c frtol is a double precision variable. c On entry frtol specifies the relative error desired in the c function. Convergence occurs if the estimate of the c relative error between f(x) and f(xsol), where xsol c is a local minimizer, is less than frtol. c On exit frtol is unchanged. c c fatol is a double precision variable. c On entry fatol specifies the absolute error desired in the c function. Convergence occurs if the estimate of the c absolute error between f(x) and f(xsol), where xsol c is a local minimizer, is less than fatol. c On exit fatol is unchanged. c c fmin is a double precision variable. c On entry fmin specifies a lower bound for the function. c The subroutine exits with a warning if f < fmin. c On exit fmin is unchanged. c c maxfev is an integer variable. c On entry maxfev specifies the limit of function evaluations. c On exit maxfev is unchanged. c c cgtol is a double precision variable. c On entry gqttol specifies the convergence criteria for c subproblems. c On exit gqttol is unchanged. c c ********** */ /* Parameters for updating the iterates. */ double eta0 = 1e-4, eta1 = 0.25, eta2 = 0.75; /* Parameters for updating the trust region size delta. */ double sigma1 = 0.25, sigma2 = 0.5, sigma3 = 4; double p5 = 0.5, one = 1; double gnorm, gnorm0, delta, snorm; double alphac = 1, alpha, f, fc, prered, actred, gs; int search = 1, iter = 1, info, inc = 1; double *xc = (double *) xmalloc(sizeof(double)*n); double *s = (double *) xmalloc(sizeof(double)*n); double *wa = (double *) xmalloc(sizeof(double)*n); double *g = (double *) xmalloc(sizeof(double)*n); double *A = NULL; uhes(n, x, &A); ugrad(n, x, g); ufv(n, x, &f); gnorm0 = F77_CALL(dnrm2)(&n, g, &inc); delta = 1000*gnorm0; gnorm = dgpnrm(n, x, xl, xu, g); if (gnorm <= gtol*gnorm0) { /* //printf("CONVERGENCE: GTOL TEST SATISFIED\n"); */ search = 0; } while (search) { /* Save the best function value and the best x. */ fc = f; memcpy(xc, x, sizeof(double)*n); /* Compute the Cauchy step and store in s. */ dcauchy(n, x, xl, xu, A, g, delta, &alphac, s, wa); /* Compute the projected Newton step. */ dspcg(n, x, xl, xu, A, g, delta, cgtol, s, &info); if (ufv(n, x, &f) > maxfev) { /* //printf("ERROR: NFEV > MAXFEV\n"); */ search = 0; continue; } /* Compute the predicted reduction. */ memcpy(wa, g, sizeof(double)*n); F77_CALL(dsymv)("U", &n, &p5, A, &n, s, &inc, &one, wa, &inc); prered = -F77_CALL(ddot)(&n, s, &inc, wa, &inc); /* Compute the actual reduction. */ actred = fc - f; /* On the first iteration, adjust the initial step bound. */ snorm = F77_CALL(dnrm2)(&n, s, &inc); if (iter == 1) delta = mymin(delta, snorm); /* Compute prediction alpha*snorm of the step. */ gs = F77_CALL(ddot)(&n, g, &inc, s, &inc); if (f - fc - gs <= 0) alpha = sigma3; else alpha = mymax(sigma1, -0.5*(gs/(f - fc - gs))); /* Update the trust region bound according to the ratio of actual to predicted reduction. */ if (actred < eta0*prered) /* Reduce delta. Step is not successful. */ delta = mymin(mymax(alpha, sigma1)*snorm, sigma2*delta); else { if (actred < eta1*prered) /* Reduce delta. Step is not sufficiently successful. */ delta = mymax(sigma1*delta, mymin(alpha*snorm, sigma2*delta)); else if (actred < eta2*prered) /* The ratio of actual to predicted reduction is in the interval (eta1,eta2). We are allowed to either increase or decrease delta. */ delta = mymax(sigma1*delta, mymin(alpha*snorm, sigma3*delta)); else /* The ratio of actual to predicted reduction exceeds eta2. Do not decrease delta. */ delta = mymax(delta, mymin(alpha*snorm, sigma3*delta)); } /* Update the iterate. */ if (actred > eta0*prered) { /* Successful iterate. */ iter++; /* uhes(n, x, &A); */ ugrad(n, x, g); gnorm = dgpnrm(n, x, xl, xu, g); if (gnorm <= gtol*gnorm0) { /* //printf("CONVERGENCE: GTOL = %g TEST SATISFIED\n", gnorm/gnorm0); */ search = 0; continue; } } else { /* Unsuccessful iterate. */ memcpy(x, xc, sizeof(double)*n); f = fc; } /* Test for convergence */ if (f < fmin) { //printf("WARNING: F .LT. FMIN\n"); search = 0; /* warning */ continue; } if (fabs(actred) <= fatol && prered <= fatol) { //printf("CONVERGENCE: FATOL TEST SATISFIED\n"); search = 0; continue; } if (fabs(actred) <= frtol*fabs(f) && prered <= frtol*fabs(f)) { /* //printf("CONVERGENCE: FRTOL TEST SATISFIED\n"); */ search = 0; continue; } } free(g); free(xc); free(s); free(wa); } kernlab/src/wkasailcp.cpp0000644000175100001440000000452112774400037015165 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/W_kasai_lcp.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 11 Oct 2006 #ifndef W_KASAI_LCP_CPP #define W_KASAI_LCP_CPP #include "wkasailcp.h" #include /** * Compute LCP array. Algorithm adapted from Manzini's SWAT2004 paper. * Modification: array indexing changed from 1-based to 0-based. * * \param text - (IN) The text which corresponds to SA. * \param len - (IN) Length of text. * \param sa - (IN) Suffix array. * \param lcp - (OUT) Computed LCP array. */ ErrorCode W_kasai_lcp::ComputeLCP(const SYMBOL *text, const UInt32 &len, const UInt32 *sa, LCP& lcp) { //chteo: [111006:0141] //std::vector isa(len); UInt32 *isa = new UInt32[len]; //' Step 1: Compute inverse suffix array for(UInt32 i=0; i0) h--; } //chteo: [111006:0141] delete [] isa; isa = 0; return NOERROR; } #endif kernlab/src/dgpnrm.c0000644000175100001440000000217212774400037014136 0ustar hornikusers#include double dgpnrm(int n, double *x, double *xl, double *xu, double *g) { /* c ********** c c Function dgpnrm c c This function computes the infinite norm of the c projected gradient at x. c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x specifies the vector x. c On exit x is unchanged. c c xl is a double precision array of dimension n. c On entry xl is the vector of lower bounds. c On exit xl is unchanged. c c xu is a double precision array of dimension n. c On entry xu is the vector of upper bounds. c On exit xu is unchanged. c c g is a double precision array of dimension n. c On entry g specifies the gradient g. c On exit g is unchanged. c c ********** */ int i; double norm = 0; for (i=0;i= 0 && x[i] == xl[i]))) if (fabs(g[i]) > norm) norm = fabs(g[i]); return norm; } kernlab/src/dtrpcg.c0000644000175100001440000001515112774400037014133 0ustar hornikusers#include #include #include #include extern void *xmalloc(size_t); /* LEVEL 1 BLAS */ /* extern int daxpy_(int *, double *, double *, int *, double *, int *); */ /* extern double ddot_(int *, double *, int *, double *, int *); */ /* extern double dnrm2_(int *, double *, int *); */ /* extern int dscal_(int *, double *, double *, int *); */ /* LEVEL 2 BLAS */ /* extern int dtrsv_(char *, char *, char *, int *, double *, int *, double *, int *); */ /* extern int dsymv_(char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); */ /* MINPACK 2 */ extern void dtrqsol(int, double *, double *, double , double *); void dtrpcg(int n, double *A, double *g, double delta, double *L, double tol, double stol, double *w, int *iters, int *info) { /* c ********* c c Subroutine dtrpcg c c Given a dense symmetric positive semidefinite matrix A, this c subroutine uses a preconditioned conjugate gradient method to find c an approximate minimizer of the trust region subproblem c c min { q(s) : || L'*s || <= delta }. c c where q is the quadratic c c q(s) = 0.5*s'*A*s + g'*s, c c This subroutine generates the conjugate gradient iterates for c the equivalent problem c c min { Q(w) : || w || <= delta }. c c where Q is the quadratic defined by c c Q(w) = q(s), w = L'*s. c c Termination occurs if the conjugate gradient iterates leave c the trust region, a negative curvature direction is generated, c or one of the following two convergence tests is satisfied. c c Convergence in the original variables: c c || grad q(s) || <= tol c c Convergence in the scaled variables: c c || grad Q(w) || <= stol c c Note that if w = L'*s, then L*grad Q(w) = grad q(s). c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c A is a double precision array of dimension n*n. c On entry A specifies the matrix A. c On exit A is unchanged. c c g is a double precision array of dimension n. c On entry g must contain the vector g. c On exit g is unchanged. c c delta is a double precision variable. c On entry delta is the trust region size. c On exit delta is unchanged. c c L is a double precision array of dimension n*n. c On entry L need not to be specified. c On exit the lower triangular part of L contains the matrix L. c c tol is a double precision variable. c On entry tol specifies the convergence test c in the un-scaled variables. c On exit tol is unchanged c c stol is a double precision variable. c On entry stol specifies the convergence test c in the scaled variables. c On exit stol is unchanged c c w is a double precision array of dimension n. c On entry w need not be specified. c On exit w contains the final conjugate gradient iterate. c c iters is an integer variable. c On entry iters need not be specified. c On exit iters is set to the number of conjugate c gradient iterations. c c info is an integer variable. c On entry info need not be specified. c On exit info is set as follows: c c info = 1 Convergence in the original variables. c || grad q(s) || <= tol c c info = 2 Convergence in the scaled variables. c || grad Q(w) || <= stol c c info = 3 Negative curvature direction generated. c In this case || w || = delta and a direction c c of negative curvature w can be recovered by c solving L'*w = p. c c info = 4 Conjugate gradient iterates exit the c trust region. In this case || w || = delta. c c info = 5 Failure to converge within itermax(n) iterations. c c ********** */ int i, inc = 1; double one = 1, zero = 0, alpha, malpha, beta, ptq, rho; double *p, *q, *t, *r, *z, sigma, rtr, rnorm, rnorm0, tnorm; p = (double *) xmalloc(sizeof(double)*n); q = (double *) xmalloc(sizeof(double)*n); t = (double *) xmalloc(sizeof(double)*n); r = (double *) xmalloc(sizeof(double)*n); z = (double *) xmalloc(sizeof(double)*n); /* Initialize the iterate w and the residual r. Initialize the residual t of grad q to -g. Initialize the residual r of grad Q by solving L*r = -g. Note that t = L*r. */ for (i=0;i 0) alpha = rho/ptq; else alpha = 0; dtrqsol(n, w, p, delta, &sigma); /* Exit if there is negative curvature or if the iterates exit the trust region. */ if (ptq <= 0 || alpha >= sigma) { F77_CALL(daxpy)(&n, &sigma, p, &inc, w, &inc); if (ptq <= 0) *info = 3; else *info = 4; goto return0; } /* Update w and the residuals r and t. Note that t = L*r. */ malpha = -alpha; F77_CALL(daxpy)(&n, &alpha, p, &inc, w, &inc); F77_CALL(daxpy)(&n, &malpha, q, &inc, r, &inc); F77_CALL(daxpy)(&n, &malpha, z, &inc, t,&inc); /* Exit if the residual convergence test is satisfied. */ rtr = F77_CALL(ddot)(&n, r, &inc, r, &inc); rnorm = sqrt(rtr); tnorm = sqrt(F77_CALL(ddot)(&n, t, &inc, t, &inc)); if (tnorm <= tol) { *info = 1; goto return0; } if (rnorm <= stol) { *info = 2; goto return0; } /* Compute p = r + beta*p and update rho. */ beta = rtr/rho; F77_CALL(dscal)(&n, &beta, p, &inc); F77_CALL(daxpy)(&n, &one, r, &inc, p, &inc); rho = rtr; } /* iters > itermax = n */ *info = 5; return0: free(p); free(q); free(r); free(t); free(z); } kernlab/src/inductionsort.h0000644000175100001440000000554312774400037015565 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the MSufSort suffix sorting algorithm (Version 2.2). * * The Initial Developer of the Original Code is * Michael A. Maniscalco * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Michael A. Maniscalco * * ***** END LICENSE BLOCK ***** */ #ifndef MSUFSORT_INDUCTION_SORTING_H #define MSUFSORT_INDUCTION_SORTING_H #include "introsort.h" class InductionSortObject { public: InductionSortObject(unsigned int inductionPosition = 0, unsigned int inductionValue = 0, unsigned int suffixIndex = 0); bool operator <= (InductionSortObject & object); bool operator == (InductionSortObject & object); InductionSortObject& operator = (InductionSortObject & object); bool operator >= (InductionSortObject & object); bool operator > (InductionSortObject & object); bool operator < (InductionSortObject & object); unsigned int m_sortValue[2]; }; inline bool InductionSortObject::operator <= (InductionSortObject & object) { if (m_sortValue[0] < object.m_sortValue[0]) return true; else if (m_sortValue[0] == object.m_sortValue[0]) return (m_sortValue[1] <= object.m_sortValue[1]); return false; } inline bool InductionSortObject::operator == (InductionSortObject & object) { return ((m_sortValue[0] == object.m_sortValue[0]) && (m_sortValue[1] == object.m_sortValue[1])); } inline bool InductionSortObject::operator >= (InductionSortObject & object) { if (m_sortValue[0] > object.m_sortValue[0]) return true; else if (m_sortValue[0] == object.m_sortValue[0]) return (m_sortValue[1] >= object.m_sortValue[1]); return false; } inline InductionSortObject & InductionSortObject::operator = (InductionSortObject & object) { m_sortValue[0] = object.m_sortValue[0]; m_sortValue[1] = object.m_sortValue[1]; return *this; } inline bool InductionSortObject::operator > (InductionSortObject & object) { if (m_sortValue[0] > object.m_sortValue[0]) return true; else if (m_sortValue[0] == object.m_sortValue[0]) return (m_sortValue[1] > object.m_sortValue[1]); return false; } inline bool InductionSortObject::operator < (InductionSortObject & object) { if (m_sortValue[0] < object.m_sortValue[0]) return true; else if (m_sortValue[0] == object.m_sortValue[0]) return (m_sortValue[1] < object.m_sortValue[1]); return false; } #endif kernlab/src/ctable.cpp0000644000175100001440000000661712774400037014451 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ChildTable.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 #ifndef CTABLE_CPP #define CTABLE_CPP #include "ctable.h" #include /** * Return the value of idx-th "up" field of child table. * val = childtab[idx -1]; * * \param idx - (IN) The index of child table. * \param val - (OUT) The value of idx-th entry in child table's "up" field. */ ErrorCode ChildTable::up(const UInt32 &idx, UInt32 &val){ if(idx == size()) { // Special case: To get the first 0-index val = (*this)[idx-1]; return NOERROR; } // svnvish: BUGBUG // Do we need to this in production code? UInt32 lcp_idx = 0, lcp_prev_idx = 0; lcp_idx = _lcptab[idx]; lcp_prev_idx = _lcptab[idx-1]; assert(lcp_prev_idx > lcp_idx); val = (*this)[idx-1]; return NOERROR; } /** * Return the value of idx-th "down" field of child table. Deprecated. * Instead use val = childtab[idx]; * * \param idx - (IN) The index of child table. * \param val - (OUT) The value of idx-th entry in child table's "down" field. */ ErrorCode ChildTable::down(const UInt32 &idx, UInt32 &val){ // For a l-interval, l-[i..j], childtab[i].down == childtab[j+1].up // If l-[i..j] is last child-interval of its parent OR 0-[0..n], // childtab[i].nextlIndex == childtab[i].down // svnvish: BUGBUG // Do we need to this in production code? // UInt32 lcp_idx = 0, lcp_nextidx = 0; // lcp_nextidx = _lcptab[(*this)[idx]]; // lcp_idx = _lcptab[idx]; // assert(lcp_nextidx > lcp_idx); // childtab[i].down := childtab[i].nextlIndex val = (*this)[idx]; return NOERROR; } /** * Return the first l-index of a given l-[i..j] interval. * * \param i - (IN) Left bound of l-[i..j] * \param j - (IN) Right bound of l-[i..j] * \param idx - (OUT) The first l-index. */ ErrorCode ChildTable::l_idx(const UInt32 &i, const UInt32 &j, UInt32 &idx){ UInt32 up = (*this)[j]; if(i < up && up <= j){ idx = up; }else { idx = (*this)[i]; } return NOERROR; } /** * Dump array elements to output stream * * \param os - (IN) Output stream. * \param ct - (IN) ChildTable object. */ std::ostream& operator << (std::ostream& os, const ChildTable& ct){ for( UInt32 i = 0; i < ct.size(); i++ ){ os << "ct[ " << i << "]: " << ct[i] << std::endl; } return os; } #endif kernlab/src/misc.c0000644000175100001440000000055312774400037013603 0ustar hornikusers#include #include void *xmalloc(size_t size) { void *ptr = (void *) malloc(size); return ptr; } double mymax(double a, double b) { if (a > b) return a; return b; } double mymin(double a, double b) { if (a < b) return a; return b; } double sign(double a, double b) { if (b >= 0) return fabs(a); return -fabs(a); } kernlab/src/expdecayweight.h0000644000175100001440000000342012774400037015663 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ExpDecayWeight.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 #ifndef EXPDECAYWEIGHT_H #define EXPDECAYWEIGHT_H #include "datatype.h" #include "errorcode.h" #include "iweightfactory.h" #include class ExpDecayWeight : public I_WeightFactory { public: Real lambda; /// Constructors //' NOTE: lambda shouldn't be equal to 1, othexrwise there will be //' divide-by-zero error. ExpDecayWeight(const Real &lambda_=2.0):lambda(lambda_) {} /// Destructor virtual ~ExpDecayWeight(){} /// Compute weight ErrorCode ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight); }; #endif kernlab/src/dcauchy.c0000644000175100001440000001147612774400037014276 0ustar hornikusers#include #include extern void *xmalloc(size_t); /* LEVEL 1 BLAS */ /* extern double ddot_(int *, double *, int *, double *, int *); extern double dnrm2_(int *, double *, int *); */ /* LEVEL 2 BLAS */ /* extern int dsymv_(char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); */ /* MINPACK 2 */ extern void dbreakpt(int, double *, double *, double *, double *, int *, double *, double *); extern void dgpstep(int, double *, double *, double *, double, double *, double *); void dcauchy(int n, double *x, double *xl, double *xu, double *A, double *g, double delta, double *alpha, double *s) { /* c ********** c c Subroutine dcauchy c c This subroutine computes a Cauchy step that satisfies a trust c region constraint and a sufficient decrease condition. c c The Cauchy step is computed for the quadratic c c q(s) = 0.5*s'*A*s + g'*s, c c where A is a symmetric matrix , and g is a vector. Given a c parameter alpha, the Cauchy step is c c s[alpha] = P[x - alpha*g] - x, c c with P the projection onto the n-dimensional interval [xl,xu]. c The Cauchy step satisfies the trust region constraint and the c sufficient decrease condition c c || s || <= delta, q(s) <= mu_0*(g'*s), c c where mu_0 is a constant in (0,1). c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x specifies the vector x. c On exit x is unchanged. c c xl is a double precision array of dimension n. c On entry xl is the vector of lower bounds. c On exit xl is unchanged. c c xu is a double precision array of dimension n. c On entry xu is the vector of upper bounds. c On exit xu is unchanged. c c A is a double precision array of dimension n*n. c On entry A specifies the matrix A. c On exit A is unchanged. c c g is a double precision array of dimension n. c On entry g specifies the gradient g. c On exit g is unchanged. c c delta is a double precision variable. c On entry delta is the trust region size. c On exit delta is unchanged. c c alpha is a double precision variable. c On entry alpha is the current estimate of the step. c On exit alpha defines the Cauchy step s[alpha]. c c s is a double precision array of dimension n. c On entry s need not be specified. c On exit s is the Cauchy step s[alpha]. c c ********** */ double one = 1, zero = 0; /* Constant that defines sufficient decrease. Interpolation and extrapolation factors. */ double mu0 = 0.01, interpf = 0.1, extrapf = 10; int search, interp, nbrpt, nsteps = 1, i, inc = 1; double alphas, brptmax, brptmin, gts, q; double *wa = (double *) xmalloc(sizeof(double)*n); /* Find the minimal and maximal break-point on x - alpha*g. */ for (i=0;i delta) interp = 1; else { F77_CALL(dsymv)("U", &n, &one, A, &n, s, &inc, &zero, wa, &inc); gts = F77_CALL(ddot)(&n, g, &inc, s, &inc); q = 0.5*F77_CALL(ddot)(&n, s, &inc, wa, &inc) + gts; interp = q >= mu0*gts ? 1 : 0; } /* Either interpolate or extrapolate to find a successful step. */ if (interp) { /* Reduce alpha until a successful step is found. */ search = 1; while (search) { /* This is a crude interpolation procedure that will be replaced in future versions of the code. */ nsteps++; (*alpha) *= interpf; dgpstep(n, x, xl, xu, -(*alpha), g, s); if (F77_CALL(dnrm2)(&n, s, &inc) <= delta) { F77_CALL(dsymv)("U", &n, &one, A, &n, s, &inc, &zero, wa, &inc); gts = F77_CALL(ddot)(&n, g, &inc, s, &inc); q = 0.5 * F77_CALL(ddot)(&n, s, &inc, wa, &inc) + gts; search = q > mu0*gts ? 1 : 0; } } } else { search = 1; alphas = *alpha; /* Increase alpha until a successful step is found. */ while (search && (*alpha) <= brptmax) { /* This is a crude extrapolation procedure that will be replaced in future versions of the code. */ nsteps++; alphas = *alpha; (*alpha) *= extrapf; dgpstep(n, x, xl, xu, -(*alpha), g, s); if (F77_CALL(dnrm2)(&n, s, &inc) <= delta) { F77_CALL(dsymv)("U", &n, &one, A, &n, s, &inc, &zero, wa, &inc); gts = F77_CALL(ddot)(&n, g, &inc, s, &inc); q = 0.5 * F77_CALL(ddot)(&n, s, &inc, wa, &inc) + gts; search = q < mu0*gts ? 1 : 0; } else search = 0; } *alpha = alphas; dgpstep(n, x, xl, xu, -(*alpha), g, s); } free(wa); } kernlab/src/wmsufsort.cpp0000644000175100001440000000442512774400037015263 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/W_msufsort.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 //' Wrapper for Michael Maniscalco's MSufSort version 2.2 algorithm #ifndef W_MSUFSORT_CPP #define W_MSUFSORT_CPP #include #include #include #include "wmsufsort.h" W_msufsort::W_msufsort() { msuffixsorter = new MSufSort(); } W_msufsort::~W_msufsort() { delete msuffixsorter; } /** * Construct Suffix Array using Michael Maniscalco's algorithm * * \param _text - (IN) The text which resultant SA corresponds to. * \param _len - (IN) The length of the text. * \param _sa - (OUT) Suffix array instance. */ ErrorCode W_msufsort::ConstructSA(SYMBOL *text, const UInt32 &len, UInt32 *&array){ //' A temporary copy of text SYMBOL *text_copy = new SYMBOL[len]; //' chteo: BUGBUG //' redundant? assert(text_copy != NULL); memcpy(text_copy, text, sizeof(SYMBOL) * len); msuffixsorter->Sort(text_copy, len); //' Code adapted from MSufSort::verifySort() for (UInt32 i = 0; i < len; i++) { UInt32 tmp = msuffixsorter->ISA(i)-1; array[tmp] = i; } //' Deallocate the memory allocated for #text_copy# delete [] text_copy; return NOERROR; } #endif kernlab/src/dspcg.c0000644000175100001440000001617212774400037013754 0ustar hornikusers#include #include extern void *xmalloc(size_t); extern double mymin(double, double); extern double mymax(double, double); /* LEVEL 1 BLAS */ /*extern double dnrm2_(int *, double *, int *);*/ /* LEVEL 2 BLAS */ /*extern int dsymv_(char *, int *, double *, double *, int *, double *, int *, double *, double *, int *);*/ /*extern void dtrsv_(char *, char *, char *, int *, double *, int *, double *, int *);*/ /* MINPACK 2 */ extern void dprsrch(int, double *, double *, double *, double *, double *, double *); extern double dprecond(int, double *, double *); extern void dtrpcg(int, double*, double *, double, double *, double, double, double *, int *, int *); void dspcg(int n, double *x, double *xl, double *xu, double *A, double *g, double delta, double rtol, double *s, int *info) { /* c ********* c c Subroutine dspcg c c This subroutine generates a sequence of approximate minimizers c for the subproblem c c min { q(x) : xl <= x <= xu }. c c The quadratic is defined by c c q(x[0]+s) = 0.5*s'*A*s + g'*s, c c where x[0] is a base point provided by the user, A is a symmetric c positive semidefinite dense matrix, and g is a vector. c c At each stage we have an approximate minimizer x[k], and generate c a direction p[k] by solving the subproblem c c min { q(x[k]+p) : || p || <= delta, s(fixed) = 0 }, c c where fixed is the set of variables fixed at x[k], delta is the c trust region bound. c c B = A(free:free), c c where free is the set of free variables at x[k]. Given p[k], c the next minimizer x[k+1] is generated by a projected search. c c The starting point for this subroutine is x[1] = x[0] + s, where c x[0] is a base point and s is the Cauchy step. c c The subroutine converges when the step s satisfies c c || (g + A*s)[free] || <= rtol*|| g[free] || c c In this case the final x is an approximate minimizer in the c face defined by the free variables. c c The subroutine terminates when the trust region bound does c not allow further progress, that is, || L'*p[k] || = delta. c In this case the final x satisfies q(x) < q(x[k]). c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x specifies the vector x. c On exit x is the final minimizer. c c xl is a double precision array of dimension n. c On entry xl is the vector of lower bounds. c On exit xl is unchanged. c c xu is a double precision array of dimension n. c On entry xu is the vector of upper bounds. c On exit xu is unchanged. c c A is a double precision array of dimension n*n. c On entry A specifies the matrix A. c On exit A is unchanged. c c g is a double precision array of dimension n. c On entry g must contain the vector g. c On exit g is unchanged. c c delta is a double precision variable. c On entry delta is the trust region size. c On exit delta is unchanged. c c rtol is a double precision variable. c On entry rtol specifies the accuracy of the final minimizer. c On exit rtol is unchanged. c c s is a double precision array of dimension n. c On entry s is the Cauchy step. c On exit s contain the final step. c c info is an integer variable. c On entry info need not be specified. c On exit info is set as follows: c c info = 1 Convergence. The final step s satisfies c || (g + A*s)[free] || <= rtol*|| g[free] ||, c and the final x is an approximate minimizer c in the face defined by the free variables. c c info = 2 Termination. The trust region bound does c not allow further progress. */ int i, j, nfaces, nfree, inc = 1, infotr, iters = 0, itertr; double gfnorm, gfnormf, stol = 1e-16, alpha; double one = 1, zero = 0; double *B = (double *) xmalloc(sizeof(double)*n*n); double *L = (double *) xmalloc(sizeof(double)*n*n); double *w = (double *) xmalloc(sizeof(double)*n); double *wa = (double *) xmalloc(sizeof(double)*n); double *wxl = (double *) xmalloc(sizeof(double)*n); double *wxu = (double *) xmalloc(sizeof(double)*n); int *indfree = (int *) xmalloc(sizeof(int)*n); double *gfree = (double *) xmalloc(sizeof(double)*n); /* Compute A*(x[1] - x[0]) and store in w. */ F77_CALL(dsymv)("U", &n, &one, A, &n, s, &inc, &zero, w, &inc); /* Compute the Cauchy point. */ for (j=0;j * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/DataType.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 11 Oct 2006 #ifndef DATATYPE_H #define DATATYPE_H // #define UInt32 unsigned int // #define UInt64 unsigned long long // #define Byte1 unsigned char // #define Byte2 unsigned short // #define Real double typedef unsigned int UInt32; // Seems that even using __extension__ g++ 4.6 will complain that // ISO C++ 1998 does not support 'long long' ... /* #if defined __GNUC__ && __GNUC__ >= 2 __extension__ typedef unsigned long long UInt64; #else typedef unsigned long long UInt64; #endif */ #include typedef uint64_t UInt64; typedef unsigned char Byte1; typedef unsigned short Byte2; typedef double Real; // #define SENTINEL '\n' // #define SENTINEL2 '\0' const char SENTINEL = '\n'; const char SENTINEL2 = '\0'; #ifndef UNICODE // # define SYMBOL Byte1 typedef Byte1 SYMBOL; #else // # define SYMBOL Byte2 typedef Byte2 SYMBOL; #endif #endif kernlab/src/lcp.h0000644000175100001440000000452512774400037013436 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/LCP.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 11 Oct 2006 #ifndef LCP_H #define LCP_H #include "datatype.h" #include "errorcode.h" #include #include #include #include #include /** * LCP array class */ class LCP { private: /// Compacted array /* std::vector _p_array; */ /* std::vector _idx_array; */ /* std::vector _val_array; */ Byte1 *_p_array; UInt32 *_idx_array; UInt32 *_val_array; UInt32 _size; bool _is_compact; UInt32 *_beg; UInt32 *_end; UInt32 *_cache; /* typedef std::vector::const_iterator const_itr; */ /* const_itr _beg; */ /* const_itr _end; */ /* const_itr _cache; */ UInt32 _dist; public: /// Original array - 4bytes //std::vector array; UInt32 *array; /// Constructors LCP(const UInt32 &size); /// Destructors virtual ~LCP(); /// Methods /// Compact 4n bytes array into (1n+8p) bytes arrays ErrorCode compact(void); /// Retrieve lcp array value // ErrorCode lcp(const UInt32 &idx, UInt32 &value); UInt32 operator[] (const UInt32& idx); friend std::ostream& operator << (std::ostream& os, LCP& lcp); }; #endif kernlab/src/stringk.c0000644000175100001440000001100612774400037014324 0ustar hornikusers#include #include #include #include #include #include #include #include #include #include double ***cache ; double kaux (const char *u, int p, const char *v, int q, int n, double lambda) { register int j; double tmp; /* case 1: if a full substring length is processed, return*/ if (n == 0) return (1.0); /* check, if the value was already computed in a previous computation */ if (cache [n] [p] [q] != -1.0) return (cache [n] [p] [q]); /* case 2: at least one substring is to short */ if (p < n || q < n) return (0.0); /* case 3: recursion */ for (j= 0, tmp = 0; j < q; j++) { if (v [j] == u [p - 1]) tmp += kaux (u, p - 1, v, j, n - 1, lambda) * pow (lambda, (float) (q - j + 1)); } cache [n] [p] [q] = lambda * kaux (u, p - 1, v, q, n, lambda) + tmp; return (cache [n] [p] [q]); } double seqk (const char *u, int p, const char *v, int q, int n, double lambda) { register int j; double kp; /* the simple case: (at least) one string is to short */ if (p < n || q < n) return (0.0); /* the recursion: use kaux for the t'th substrings*/ for (j = 0, kp = 0.0; j < q; j++) { if (v [j] == u [p - 1]) kp += kaux (u, p - 1, v, j, n - 1, lambda) * lambda * lambda; } return (seqk (u, p - 1, v, q, n, lambda) + kp); } /* recursively computes the subsequence kernel between s1 and s2 where subsequences of exactly length n are considered */ SEXP subsequencek(SEXP s1, SEXP s2, SEXP l1, SEXP l2, SEXP nr, SEXP lambdar) { const char *u = CHAR(STRING_ELT(s1, 0)); const char *v = CHAR(STRING_ELT(s2, 0)); int p = *INTEGER(l1); int q = *INTEGER(l2); int n = *INTEGER(nr); double lambda = *REAL(lambdar); int i, j, k; SEXP ret; /* allocate memory for auxiallary cache variable */ cache = (double ***) malloc (n * sizeof (double **)); for (i = 1; i < n; i++) { cache [i] = (double **) malloc (p * sizeof (double *)); for (j = 0; j < p; j++) { cache [i] [j] = (double *) malloc (q * sizeof (double)); for (k = 0; k < q; k++) cache [i] [j] [k] = -1.0; } } PROTECT(ret = allocVector(REALSXP, 1)); /* invoke recursion */ REAL(ret)[0] = seqk (u, p, v, q, n, lambda); /* free memory */ for (i = 1; i < n; i++) { for (j = 0; j < p; j++) free (cache [i] [j]); free (cache [i]); } free (cache); UNPROTECT(1); return (ret); } /* computes the substring kernel between s1 and s2 where substrings up to length n are considered */ SEXP fullsubstringk (SEXP s1, SEXP s2, SEXP l1, SEXP l2, SEXP nr, SEXP lambdar) { const char *u = CHAR(STRING_ELT(s1, 0)); const char *v = CHAR(STRING_ELT(s2, 0)); int p = *INTEGER(l1); int q = *INTEGER(l2); int n = *INTEGER(nr); double lambda = *REAL(lambdar); register int i, j, k; double ret, tmp; SEXP retk; /* computes the substring kernel */ for (ret = 0.0, i = 0; i < p; i++) { for (j = 0; j < q; j++) if (u [i] == v [j]) { for (k = 0, tmp = lambda * lambda; /* starting condition */ (i + k < p) && (j + k < q) && (u [i + k] == v [j + k]) && (k < n); /* stop conditions */ k++, tmp *= (lambda * lambda)) /* update per iteration */ ret += tmp; } } PROTECT(retk = allocVector(REALSXP, 1)); REAL(retk)[0] = ret; UNPROTECT(1); return (retk); } /* computes the substring kernel between s1 and s2 where substrings of exactly length n are considered */ SEXP substringk (SEXP s1, SEXP s2, SEXP l1, SEXP l2, SEXP nr, SEXP lambdar) { const char *u = CHAR(STRING_ELT(s1, 0)); const char *v = CHAR(STRING_ELT(s2, 0)); int p = *INTEGER(l1); int q = *INTEGER(l2); int n = *INTEGER(nr); double lambda = *REAL(lambdar); SEXP retk; register int i, j, k; double ret, tmp; /* computes the substring kernel */ for (ret = 0.0, i = 0; i < p; i++) { for (j = 0; j < q; j++) { for (k = 0, tmp = lambda * lambda; /* starting condition */ (i + k < p) && (j + k < q) && (u [i + k] == v [j + k]) && (k < n); /* stop conditions */ k++, tmp *= (lambda * lambda)); /* update per iteration */ if (k == n) ret += tmp; /* update features in case of full match */ } } PROTECT(retk = allocVector(REALSXP, 1)); REAL(retk)[0] = ret; UNPROTECT(1); return (retk); } kernlab/src/dtrqsol.c0000644000175100001440000000333612774400037014342 0ustar hornikusers#include #include extern double mymax(double, double); /* LEVEL 1 BLAS */ /*extern double ddot_(int *, double *, int *, double *, int *);*/ void dtrqsol(int n, double *x, double *p, double delta, double *sigma) { /* c ********** c c Subroutine dtrqsol c c This subroutine computes the largest (non-negative) solution c of the quadratic trust region equation c c ||x + sigma*p|| = delta. c c The code is only guaranteed to produce a non-negative solution c if ||x|| <= delta, and p != 0. If the trust region equation has c no solution, sigma = 0. c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x must contain the vector x. c On exit x is unchanged. c c p is a double precision array of dimension n. c On entry p must contain the vector p. c On exit p is unchanged. c c delta is a double precision variable. c On entry delta specifies the scalar delta. c On exit delta is unchanged. c c sigma is a double precision variable. c On entry sigma need not be specified. c On exit sigma contains the non-negative solution. c c ********** */ int inc = 1; double dsq = delta*delta, ptp, ptx, rad, xtx; ptx = F77_CALL(ddot)(&n, p, &inc, x, &inc); ptp = F77_CALL(ddot)(&n, p, &inc, p, &inc); xtx = F77_CALL(ddot)(&n, x, &inc, x, &inc); /* Guard against abnormal cases. */ rad = ptx*ptx + ptp*(dsq - xtx); rad = sqrt(mymax(rad, 0)); if (ptx > 0) *sigma = (dsq - xtx)/(ptx + rad); else if (rad > 0) *sigma = (rad - ptx)/ptp; else *sigma = 0; } kernlab/src/esa.cpp0000644000175100001440000007370512774400037013771 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ESA.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 11 Oct 2006 #ifndef ESA_CPP #define ESA_CPP #include #include #include #include #include #include #include #include #include "esa.h" #ifdef SSARRAY #ifdef __cplusplus extern "C" { #endif #include "sarray.h" #ifdef __cplusplus } #endif #else #include "wmsufsort.h" #endif #include "wkasailcp.h" #define MIN(x,y) (((x) < (y)) ? (x):(y)) ESA::ESA(const UInt32 & size_, SYMBOL *text_, int verb): _verb(verb), size(size_), text(text_), suftab(0), lcptab(size_), childtab(size_, lcptab), suflink(0), bcktab_depth(0), bcktab_size(0), bcktab_val(0), bcktab_key4(0), coef4(0), bcktab_key8(0), coef8(0) { I_SAFactory* sa_fac = 0; I_LCPFactory* lcp_fac = 0; //' input validation assert(size > 0); // if(text[size-1] != SENTINEL) // text = (SYMBOL*)(std::string(text)+SENTINEL).c_str(); assert(text[size-1] == SENTINEL); // CW Sanity test for (unsigned int i = 0; i < size-1 ; i++) { assert(text[i] != 0); } // for (int i = 0; i < size ; i++) { // printf("%c : %i\n", text[i], (int) text[i]); // } #if SSARRAY suftab = new int[size]; for (unsigned int i = 0; i < size - 1 ; i++) { suftab[i] = text[i]; } suftab[size-1] = 0; ssarray((int*) suftab); #else //' Construct Suffix Array if(!sa_fac){ sa_fac = new W_msufsort(); } // CW Try // size = 10; // text[size-1] = 0; suftab = new UInt32[size]; sa_fac->ConstructSA(text, size, suftab); if(sa_fac) { delete sa_fac; sa_fac = NULL; } #endif //' Compute LCP array if(!lcp_fac){ lcp_fac = new W_kasai_lcp(); } // CW lcp_fac->ComputeLCP(text, size, suftab, lcptab); lcp_fac->ComputeLCP(text, size, (UInt32 *) suftab, lcptab); if(lcp_fac) { delete lcp_fac; lcp_fac = NULL; } //' Compress LCP array lcptab.compact(); //' Construct Child Table ConstructChildTable(); #ifdef SLINK //' Construct Suffix link table //' The suffix link interval, (l-1)-[p..q] of interval l-[i..j] can be retrieved //' by following method: //' Let k be the firstlIndex of l-[i..j], p = suflink[2*k], q = suflink[2*k+1]. suflink = new UInt32[2 * size + 2]; //' extra space for extra sentinel char! memset(suflink,0,sizeof(UInt32)*(2 * size +2)); ConstructSuflink(); #else //' Threshold for constructing bucket table if(size >= 1024) ConstructBcktab(); //' Otherwise, just do plain binary search to search for suffix link interval #endif } ESA::~ESA() { //if(text) { delete text; text = 0;} if(suflink) { delete [] suflink; suflink=0; } if(suftab) { delete [] suftab; suftab=0; } if(bcktab_val) { delete [] bcktab_val; bcktab_val=0; } if(bcktab_key4) { delete [] bcktab_key4; bcktab_key4=0;} if(coef4) { delete [] coef4; coef4 = 0; } if(bcktab_key8) { delete [] bcktab_key8; bcktab_key8=0;} if(coef8) { delete [] coef8; coef8 = 0; } } /// The lcp-interval structure. Used in ESA::ConstructChildTable() class lcp_interval { public: UInt32 lcp; UInt32 lb; UInt32 rb; std::vector child; /// Constructors lcp_interval(){} lcp_interval(const UInt32 &lcp_, const UInt32 lb_, const UInt32 &rb_, lcp_interval *itv) { lcp = lcp_; lb = lb_; rb = rb_; if(itv) child.push_back(itv); } /// Destructor ~lcp_interval(){ for(UInt32 i=0; i< child.size(); i++) delete child[i]; child.clear(); } }; /** * Construct 3-fields-merged child table. */ ErrorCode ESA::ConstructChildTable(){ // Input validation assert(text); assert(suftab); //' stack for lcp-intervals std::stack lit; //' Refer to: Abo05::Algorithm 4.5.2. lcp_interval *lastInterval = 0; lcp_interval *new_itv = 0; lit.push(new lcp_interval(0, 0, 0, 0)); //' root interval // Variables to handle 0-idx bool first = true; UInt32 prev_0idx = 0; UInt32 first0idx = 0; // Loop thru and process each index. for(UInt32 idx = 1; idx < size + 1; idx++) { UInt32 tmp_lb = idx - 1; //svnvish: BUGBUG // We just assume that the lcp of size + 1 is zero. // This simplifies the logic of the code UInt32 lcp_idx = 0; if(idx < size){ lcp_idx = lcptab[idx]; } while (lcp_idx < lit.top()->lcp){ lastInterval = lit.top(); lit.pop(); lastInterval->rb = idx - 1; // svnvish: Begin process UInt32 n_child = lastInterval->child.size(); UInt32 i = lastInterval->lb; UInt32 j = lastInterval->rb; // idx -1 ? //Step 1: Set childtab[i].down or childtab[j+1].up to first l-index UInt32 first_l_index = i+1; if(n_child && (lastInterval->child[0]->lb == i)) first_l_index = lastInterval->child[0]->rb+1; //svnvish: BUGBUG // ec = childtab.Set_Up(lastInterval->rb+1, first_l_index); // ec = childtab.Set_Down(lastInterval->lb, first_l_index); childtab[lastInterval->rb] = first_l_index; childtab[lastInterval->lb] = first_l_index; // Now we need to set the NextlIndex fields The main problem here // is that the child intervals might not be contiguous UInt32 ptr = i+1; UInt32 child_count = 0; while(ptr < j){ UInt32 first = j; UInt32 last = j; // Get next child to process if(n_child - child_count){ first = lastInterval->child[child_count]->lb; last = lastInterval->child[child_count]->rb; child_count++; } // Eat away singleton intervals while(ptr < first){ childtab[ptr] = ptr + 1; ptr++; } // Handle an child interval and make appropriate entries in // child table ptr = last + 1; if(last < j){ childtab[first] = ptr; } } //' Free lcp_intervals for(UInt32 child_cnt = 0; child_cnt < n_child; child_cnt++) { delete lastInterval->child[child_cnt]; lastInterval->child[child_cnt] = 0; } // svnvish: End process tmp_lb = lastInterval->lb; if(lcp_idx <= lit.top()->lcp) { lit.top()->child.push_back(lastInterval); lastInterval = 0; } }// while if(lcp_idx > lit.top()->lcp) { new_itv = new lcp_interval(lcp_idx, tmp_lb,0, lastInterval); lit.push(new_itv); new_itv = 0; lastInterval = 0; } // Handle the 0-indices. // 0-indices := { i | LCP[i]=0, \forall i = 0,...,n-1} if((idx < size) && (lcp_idx == 0)) { // svnvish: BUGBUG // ec = childtab.Set_NextlIndex(prev_0_index,k); childtab[prev_0idx] = idx; prev_0idx = idx; // Handle first 0-index specially // Store in childtab[(size-1)+1].up if(first){ // svnvish: BUGBUG // ec = childtab.Set_Up(size,k); CHECKERROR(ec); first0idx = idx; first = false; } } } // for childtab[size-1] = first0idx; // svnvish: All remaining elements in the stack are ignored. // chteo: Free all remaining elements in the stack. while(!lit.empty()) { lastInterval = lit.top(); delete lastInterval; lit.pop(); } assert(lit.empty()); return NOERROR; } #ifdef SLINK /** * Get suffix link interval, [sl_i..sl_j], of a given interval, [i..j]. * * \param i - (IN) Left bound of interval [i..j] * \param j - (IN) Right bound of interval [i..j] * \param sl_i - (OUT) Left bound of suffix link interval [sl_i..sl_j] * \param sl_j - (OUT) Right bound of suffix link interval [sl_i..sl_j] */ ErrorCode ESA::GetSuflink(const UInt32 &i, const UInt32 &j, UInt32 &sl_i, UInt32 &sl_j) { //' Input validation assert(i=0 && j= (j-i)); return NOERROR; } #elif defined(LSEARCH) /** * "Linear" Search version of GetSuflink. Suffix link intervals are not stored * explicitly but searched when needed. * * Note: Slow!!! especially in the case of long and similar texts. */ ErrorCode ESA::GetSuflink(const UInt32 &i, const UInt32 &j, UInt32 &sl_i, UInt32 &sl_j) { //' Variables SYMBOL ch; UInt32 lcp=0; UInt32 final_lcp = 0; UInt32 lb = 0, rb = size-1; //' root interval //' First suflink interval char := Second char of original interval ch = text[suftab[i]+1]; //' lcp of suffix link interval := lcp of original interval - 1 final_lcp = 0; GetLcp(i,j,final_lcp); final_lcp = (final_lcp > 0) ? final_lcp-1 : 0; //' Searching for suffix link interval sl_i = lb; sl_j = rb; while(lcp < final_lcp) { GetIntervalByChar(lb,rb,ch,lcp,sl_i, sl_j); GetLcp(sl_i, sl_j, lcp); lb = sl_i; rb = sl_j; ch = text[suftab[i]+lcp+1]; } assert(sl_j > sl_i); assert((sl_j-sl_i) >= (j-i)); return NOERROR; } #else /** * Construct bucket table. * * \param alpahabet_size - Size of alphabet set */ ErrorCode ESA::ConstructBcktab(const UInt32 &alphabet_size) { UInt32 MAX_DEPTH = 8; //' when alphabet_size is 256 UInt32 sizeof_uint4 = 4; //' 4 bytes integer UInt32 sizeof_uint8 = 8; //' 8 bytes integer UInt32 sizeof_key = sizeof_uint8; //' Step 1: Determine the bcktab_depth for(bcktab_depth = MAX_DEPTH; bcktab_depth >0; bcktab_depth--) { bcktab_size = 0; for(UInt32 i=0; i < size; i++) if(lcptab[i] < bcktab_depth) bcktab_size++; if(bcktab_depth <= 4) sizeof_key = sizeof_uint4; if(bcktab_size <= size/(sizeof_key + sizeof_uint4)) break; } //' Step 2: Allocate memory for bcktab_key and bcktab_val. //' Step 3: Precompute coefficients for computing hash values of prefixes later. //' Step 4: Collect the prefixes with lcp <= bcktab_depth and //' convert them into hash value. if(sizeof_key == sizeof_uint4) { //' (2) bcktab_key4 = new UInt32[bcktab_size]; bcktab_val = new UInt32[bcktab_size]; assert(bcktab_key4 && bcktab_val); //' (3) coef4 = new UInt32[4]; coef4[0] = 1; for(UInt32 i=1; i < 4; i++) coef4[i] = coef4[i-1]*alphabet_size; //' (4) for(UInt32 i=0, k=0; i < size; i++) { if(lcptab[i] < bcktab_depth) { UInt32 c = MIN((size-suftab[i]), bcktab_depth); hash_value4 = 0; for(UInt32 j=0; j < c; j++) hash_value4 += text[suftab[i]+j]*coef4[bcktab_depth-1-j]; bcktab_key4[k] = hash_value4; bcktab_val[k] = i; k++; } } } else { //' (2) bcktab_key8 = new UInt64[bcktab_size]; bcktab_val = new UInt32[bcktab_size]; assert(bcktab_key8 && bcktab_val); //' (3) coef8 = new UInt64[9]; coef8[0] = 1; for(UInt32 i=1; i < 9; i++) coef8[i] = coef8[i-1]*alphabet_size; //' (4) for(UInt32 i=0, k=0; i < size; i++) { if(lcptab[i] < bcktab_depth) { UInt32 c = MIN( (size-suftab[i]), bcktab_depth); hash_value8 = 0; for(UInt32 j=0; j < c; j++) hash_value8 += text[suftab[i]+j]*coef8[bcktab_depth-1-j]; bcktab_key8[k] = hash_value8; bcktab_val[k] = i; k++; } } } //' check if bcktab in ascending order // for(UInt32 ii=1; ii= 1); //' the interval [i..j] must has at least 2 suffixes. //' Variables UInt32 left=0, mid=0, right=0, tmp_right=0; UInt32 llcp=0, mlcp=0, rlcp=0; UInt32 orig_lcp = 0; UInt32 c = 0; UInt32 offset = 0; GetLcp(i, j, orig_lcp); if(orig_lcp <= 1) { sl_i = 0; sl_j = size-1; return NOERROR; } //' Default left = 0; right = size-1; //' Make use of bcktab here. Maximum lcp value is always 1 less than bcktab_depth. //' This is because including lcp values equal to bcktab_depth will violate //' the constraint of prefix uniqueness. offset = MIN(orig_lcp-1, bcktab_depth); assert(offset>=0); if(bcktab_key4) { hash_value4 = 0; for(UInt32 cnt=0; cnt < offset; cnt++) hash_value4 += coef4[bcktab_depth-1-cnt]*text[suftab[i]+1+cnt]; //' lower bound return the exact position of of target, if found one UInt32 *p = std::lower_bound(bcktab_key4, bcktab_key4+bcktab_size, hash_value4); left = bcktab_val[p - bcktab_key4]; //' this hash value is used to find the right bound of target interval hash_value4 += coef4[bcktab_depth-offset]; //' upper bound return the smallest value > than target. UInt32 *q = std::upper_bound(p, bcktab_key4+bcktab_size, hash_value4); if(q == bcktab_key4+bcktab_size) right = size-1; else right = bcktab_val[q - bcktab_key4] - 1; } else if(bcktab_key8) { hash_value8 = 0; for(UInt32 cnt=0; cnt < offset; cnt++) hash_value8 += coef8[bcktab_depth-1-cnt]*text[suftab[i]+1+cnt]; //' lower bound return the exact position of of target, if found one UInt64 *p = std::lower_bound(bcktab_key8, bcktab_key8+bcktab_size, hash_value8); left = bcktab_val[p - bcktab_key8]; //' this hash value is used to find the right bound of target interval hash_value8 += coef8[bcktab_depth-offset]; //' upper bound return the smallest value > than target. UInt64 *q = std::upper_bound(p, bcktab_key8+bcktab_size, hash_value8); if(q == bcktab_key8+bcktab_size) right = size-1; else right = bcktab_val[q - bcktab_key8] - 1; } tmp_right = right; assert(right <= size-1); assert(right > left); offset = 0; //' Compute LEFT boundary of suflink interval Compare(left, offset, &text[suftab[i]+1+offset], orig_lcp-1-offset, llcp); llcp += offset; if(llcp < orig_lcp-1) { Compare(right, offset, &text[suftab[i]+1+offset], orig_lcp-1-offset, rlcp); rlcp += offset; c = MIN(llcp,rlcp); while(right-left > 1){ mid = (left + right)/2; Compare(mid, c, &text[suftab[i]+1+c], orig_lcp-1-c, mlcp); mlcp += c; //' if target not found yet... if(mlcp < orig_lcp-1) { if(text[suftab[mid]+mlcp] < text[suftab[i]+mlcp+1]) { left = mid; llcp = mlcp; } else { right = mid; rlcp = mlcp; } } else { //' mlcp == orig_lcp-1 assert(mlcp == orig_lcp-1); //' target found, but want to make sure it is the LEFTmost... right = mid; rlcp = mlcp; } c = MIN(llcp, rlcp); } sl_i = right; llcp = rlcp; } else { sl_i = left; } //' Compute RIGHT boundary of suflink interval right = tmp_right; left = sl_i; Compare(right, offset, &text[suftab[i]+1+offset], orig_lcp-1-offset, rlcp); rlcp += offset; if(rlcp < orig_lcp-1) { c = MIN(llcp,rlcp); while(right-left > 1){ mid = (left + right)/2; Compare(mid, c, &text[suftab[i]+1+c], orig_lcp-1-c, mlcp); mlcp += c; //' if target not found yet... if(mlcp < orig_lcp-1) { if(text[suftab[mid]+mlcp] < text[suftab[i]+mlcp+1]) { //' target is on the right half left = mid; llcp = mlcp; } else { //' target is on the left half right = mid; rlcp = mlcp; } } else { //' mlcp == orig_lcp-1 assert(mlcp == orig_lcp-1); //' target found, but want to make sure it is the RIGHTmost... left = mid; llcp = mlcp; } c = MIN(llcp, rlcp); } sl_j = left; } else { sl_j = right; } assert(sl_i < sl_j); return NOERROR; } #endif /** * Find suffix link interval, [p..q], for a child interval, [c_i..c_j], given its * parent interval [p_i..p_j]. * * Pre : 1. Suffix link interval for parent interval has been computed. * 2. [child_i..child_j] is not a singleton interval. * * * \param parent_i - (IN) Left bound of parent interval. * \param parent_j - (IN) Right bound of parent interval. * \param child_i - (IN) Left bound of child interval. * \param child_j - (IN) Right bound of child interval. * \param sl_i - (OUT) Left bound of suffix link interval of child interval * \param sl_j - (OUT) Right bound of suffix link interval of child interval */ ErrorCode ESA::FindSuflink(const UInt32 &parent_i, const UInt32 &parent_j, const UInt32 &child_i, const UInt32 &child_j, UInt32 &sl_i, UInt32 &sl_j) { assert(child_i != child_j); //' Variables SYMBOL ch; UInt32 tmp_i = 0; UInt32 tmp_j = 0; UInt32 lcp_child = 0; UInt32 lcp_parent = 0; UInt32 lcp_sl = 0; //' Step 1: Get suffix link interval of parent interval and its lcp value. //' 2: Get lcp values of parent and child intervals. //' Shortcut! if(parent_i ==0 && parent_j == size-1) { //' this is root interval //' (1) sl_i = 0; sl_j = size-1; lcp_sl = 0; //' (2) lcp_parent = 0; GetLcp(child_i,child_j,lcp_child); assert(lcp_child > 0); } else { //' (1) GetSuflink(parent_i,parent_j,sl_i,sl_j); GetLcp(sl_i, sl_j, lcp_sl); //' (2) GetLcp(parent_i,parent_j,lcp_parent); GetLcp(child_i,child_j,lcp_child); assert(lcp_child > 0); } //' Traversing down the subtree of [sl_i..sl_j] and looking for //' the suffix link interval of child interval. while (lcp_sl < lcp_child-1) { //' The character that we want to look for in suflink interval. ch = text[suftab[child_i]+lcp_sl+1]; tmp_i = sl_i; tmp_j = sl_j; GetIntervalByChar(tmp_i, tmp_j, ch, lcp_sl, sl_i, sl_j); assert(sl_i > q; //' The interval queue std::pair interval; //' Step 0: Push root onto queue. And define itself as its suflink. q.push(std::make_pair((unsigned int)0,size-1)); UInt32 idx = 0; childtab.l_idx(0,size-1,idx); suflink[idx+idx] = 0; //' left bound of suffix link interval suflink[idx+idx+1] = size-1; //' right bound of suffix link interval //' Step 1: Breadth first traversal. while (!q.empty()) { //' Step 1.1: Pop interval from queue. interval = q.front(); q.pop(); //' Step 1.2: For each non-singleton child-intervals, [p..q], "find" its //' suffix link interval and then "push" it onto the interval queue. UInt32 i=0,j=0, sl_i=0, sl_j=0, start_idx=interval.first; do { //' Notes: interval.first := left bound of suffix link interval //' interval.second := right bound of suffix link interval assert(interval.first>=0 && interval.second < size); GetIntervalByIndex(interval.first, interval.second, start_idx, i, j); if(j > i) { //' [i..j] is non-singleton interval FindSuflink(interval.first, interval.second, i,j, sl_i, sl_j); assert(!(sl_i == i && sl_j == j)); //' Store suflink of [i..j] UInt32 idx=0; childtab.l_idx(i, j, idx); suflink[idx+idx] = sl_i; suflink[idx+idx+1] = sl_j; //' Push suflink interval onto queue q.push(std::make_pair(i,j)); } start_idx = j+1; //' prepare to get next child interval }while(start_idx < interval.second); } return NOERROR; } /** * Get all child-intervals, including singletons. * Store all non-singleton intervals onto #q#, where interval is defined as * (i,j) where i and j are left and right bounds. * * \param lb - (IN) Left bound of current interval. * \param rb - (IN) Right bound of current interval. * \param q - (OUT) Storage for intervals. */ ErrorCode ESA::GetChildIntervals(const UInt32 &lb, const UInt32 &rb, std::vector > &q) { //' Variables UInt32 k=0; //' general index UInt32 i=0,j=0; //' for interval [i..j] //' Input validation assert(rb-lb >= 1); k = lb; do { assert(lb>=0 && rb 0) { if(j > i) { // chteo: saved 1 operation ;) [260906] //' Non-singleton interval q.push_back(std::make_pair(i,j)); } k = j+1; }while(k < rb); return NOERROR; } /** * Given an l-interval, l-[i..j] and a starting index, idx \in [i..j], * return the child-interval, k-[p..q], of l-[i..j] where p == idx. * * Reference: Abo05::algorithm 4.6.4 * * Pre: #start_idx# is a l-index or equal to parent_i. * * \param parent_i - (IN) Left bound of parent interval. * \param parent_j - (IN) Right bound of parent interval. * \param start_idx - (IN) Predefined left bound of child interval. * \param child_i - (OUT) Left bound of child interval. * \param child_j - (OUT) Right bound of child interval. * * Time complexity: O(|alphabet set|) */ ErrorCode ESA::GetIntervalByIndex(const UInt32 &parent_i, const UInt32 &parent_j, const UInt32 &start_idx, UInt32 &child_i, UInt32 &child_j) { //' Variables UInt32 lcp_child_i = 0; UInt32 lcp_child_j = 0; //' Input validation assert( (parent_i < parent_j) && (parent_i >= 0) && (parent_j < size) && (start_idx >= parent_i) && (start_idx < parent_j)); child_i = start_idx; //' #start_idx# is not and l-index, i.e. #start_idx == #parent_i# if(child_i == parent_i) { childtab.l_idx(parent_i,parent_j,child_j); child_j--; return NOERROR; } //' #start_idx# is a l-index // svnvish:BUGBUG child_j = childtab[child_i]; lcp_child_i = lcptab[child_i]; lcp_child_j = lcptab[child_j]; if(child_i < child_j && lcp_child_i == lcp_child_j) child_j--; else { //' child_i is the left bound of last child interval child_j = parent_j; } return NOERROR; } /** * Given an l-interval, l-[i..j] and a starting character, ch \in alphabet set, * return the child-interval, k-[p..q], of l-[i..j] such that text[sa[p]+depth] == ch. * * Reference: Abo05::algorithm 4.6.4 * * Post: Return [i..j]. If interval was found, i<=j, otherwise, i>j. * * \param parent_i - (IN) Left bound of parent interval. * \param parent_j - (IN) Right bound of parent interval. * \param ch - (IN) Starting character of left bound (suffix) of child interval. * \param depth - (IN) The position where #ch# is located in #text# * i.e. ch = text[suftab[parent_i]+depth]. * \param child_i - (OUT) Left bound of child interval. * \param child_j - (OUT) Right bound of child interval. * * Time complexity: O(|alphabet set|) */ ErrorCode ESA::GetIntervalByChar(const UInt32 &parent_i, const UInt32 &parent_j, const SYMBOL &ch, const UInt32 &depth, UInt32 &child_i, UInt32 &child_j) { //' Input validation assert(parent_i < parent_j && parent_i >= 0 && parent_j < size); //' Variables UInt32 idx = 0; UInt32 idx_next = 0; UInt32 lcp_idx = 0; UInt32 lcp_idx_next = 0; UInt32 lcp = 0; //' #depth# is actually equal to the following statement! //ec = GetLcp(parent_i, parent_j, lcp); CHECKERROR(ec); lcp = depth; //' Step 1: Check if #ch# falls in the initial range. if(text[suftab[parent_i]+lcp] > ch || text[suftab[parent_j]+lcp] < ch) { //' No child interval starts with #ch#, so, return undefined interval. child_i = 1; child_j = 0; return NOERROR; } //' Step 2: #ch# is in the initial range, but not necessarily exists in the range. //' Step 2.1: Get first l-index childtab.l_idx(parent_i, parent_j, idx); assert(idx > parent_i && idx <= parent_j); if(text[suftab[idx-1]+lcp] == ch) { child_i = parent_i; child_j = idx-1; return NOERROR; } //' Step 3: Look for child interval which starts with #ch# // svnvish: BUGBUG //ec = childtab.NextlIndex(idx, idx_next); CHECKERROR(ec); idx_next = childtab[idx]; lcp_idx = lcptab[idx]; lcp_idx_next = lcptab[idx_next]; while(idx < idx_next && lcp_idx == lcp_idx_next && text[suftab[idx]+lcp] < ch) { idx = idx_next; // svnvish: BUGBUG // ec = childtab.NextlIndex(idx, idx_next); CHECKERROR(ec); idx_next = childtab[idx]; lcp_idx = lcptab[idx]; lcp_idx_next = lcptab[idx_next]; } if(text[suftab[idx]+lcp] == ch) { child_i = idx; if(idx < idx_next && lcp_idx == lcp_idx_next) child_j = idx_next - 1; else child_j = parent_j; return NOERROR; } //' Child interval starts with #ch# not found child_i = 1; child_j = 0; return NOERROR; } /** * Return the lcp value of a given interval, l-[i..j]. * * Pre: [i..j] \subseteq [0..size] * * \param i - (IN) Left bound of the interval. * \param j - (IN) Right bound of the interval. * \param val - (OUT) The lcp value of the interval. */ ErrorCode ESA::GetLcp(const UInt32 &i, const UInt32 &j, UInt32 &val) { //' Input validation assert(i < j && i >= 0 && j < size); //' Variables UInt32 up, down; //' 0-[0..size-1]. This is a shortcut! if(i == 0 && j == size) { val = 0; } else { childtab.up(j+1,up); if( (i < up) && (up <= j)) { val = lcptab[up]; } else { childtab.down(i,down); val = lcptab[down]; } } return NOERROR; } /** * Compare #pattern# string to text[suftab[#idx#]..size] and return the * length of the substring matched. * * \param idx - (IN) The index of esa. * \param depth - (IN) The start position of matching mechanism. * \param pattern - (IN) The pattern string. * \param p_len - (IN) The length of #pattern#. * \param matched_len - (OUT) The length of matched substring. */ ErrorCode ESA::Compare(const UInt32 &idx, const UInt32 &depth, SYMBOL *pattern, const UInt32 &p_len, UInt32 &matched_len) { //' Variables UInt32 min=0; min = (p_len < size-(suftab[idx]+depth)) ? p_len : size-(suftab[idx]+depth); matched_len = 0; for(UInt32 k=0; k < min; k++) { if(text[suftab[idx]+depth+k] == pattern[k]) matched_len++; else break; } return NOERROR; } /** * Find the longest matching of text and pattern. * * Note: undefinded interval := [i..j] where i>j * * Post: Return "floor" and "ceil" of longest substring of pattern that exists in text. * Otherwise, that is, no substring of pattern ever exists in text, * return the starting interval, [i..j]. * * \param i - (IN) Left bound of the starting interval. * \param j - (IN) Right bound of the starting interval. * \param offset - (IN) The number of characters between the head of suffix and the * position to start matching. * \param pattern - (IN) The pattern string to match to esa. * \param p_len - (IN) The length of #pattern# * \param lb - (OUT) The left bound of the interval containing * longest matched suffix. * \param rb - (OUT) The right bound of the interval containing * longest matched suffix. * \param matched_len - (OUT) The length of the longest matched suffix. * \param floor_lb - (OUT) Left bound of floor interval of [lb..rb]. * \param floor_rb - (OUT) Right bound of floor interval of [lb..rb]. * \param floor_len - (OUT) The lcp value of floor interval. */ ErrorCode ESA::ExactSuffixMatch(const UInt32 &i, const UInt32 &j, const UInt32 &offset, SYMBOL *pattern, const UInt32 p_len, UInt32 &lb, UInt32 &rb, UInt32 &matched_len, UInt32 &floor_lb, UInt32 &floor_rb, UInt32 &floor_len) { //' Input validation assert(i != j); //' Variables UInt32 min, lcp; bool queryFound = true; //' Initial setting. floor_lb = lb = i; floor_rb = rb = j; matched_len = offset; //' Step 1: Get lcp of floor/starting interval. GetLcp(floor_lb, floor_rb, lcp); floor_len = lcp; //' Step 2: Skipping #offset# characters while(lcp < matched_len) { floor_lb = lb; floor_rb = rb; floor_len = lcp; GetIntervalByChar(floor_lb, floor_rb, pattern[lcp], lcp, lb, rb); // printf("lb, rb : %i, %i\n", lb, rb); assert(lb <= rb); if(lb == rb) break; GetLcp(lb, rb, lcp); } //' Step 3: Continue matching from the point (either an interval or singleton) we stopped. while( (lb<=rb) && queryFound ) { if(lb != rb) { GetLcp(lb, rb, lcp); min = (lcp < p_len) ? lcp : p_len; while(matched_len < min) { queryFound = (text[suftab[lb]+matched_len] == pattern[matched_len]); if(queryFound) matched_len++; else return NOERROR; } assert(matched_len == min); //' Full pattern found! if(matched_len == p_len) return NOERROR; floor_lb = lb; floor_rb = rb; floor_len = lcp; GetIntervalByChar(floor_lb, floor_rb,pattern[matched_len],matched_len,lb,rb); }else { //' lb == rb, i.e. singleton interval. min = (p_len < size-suftab[lb]) ? p_len : size-suftab[lb]; while(matched_len rb) { lb = floor_lb; rb = floor_rb; } return NOERROR; } #endif kernlab/src/esa.h0000644000175100001440000001062112774400037013422 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ESA.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 #ifndef ESA_H #define ESA_H #include "datatype.h" #include "errorcode.h" #include "lcp.h" #include "ctable.h" #include "ilcpfactory.h" #include "isafactory.h" #include #include //#define SLINK // #define SSARRAY // does not yeet work correctly, CW class ESA { private: int _verb; public: UInt32 size; //' The length of #text# SYMBOL *text; //' Text corresponds to SA #ifdef SSARRAY int *suftab; //' Suffix Array #else UInt32 *suftab; //' Suffix Array #endif LCP lcptab; //' LCP array ChildTable childtab; //' Child table (fields merged) UInt32 *suflink; //' Suffix link table. Two fields: l,r //' --- for bucket table --- UInt32 bcktab_depth; //' Number of char defining each bucket UInt32 bcktab_size; //' size of bucket table UInt32 *bcktab_val; //' value column of bucket table UInt32 *bcktab_key4; //' 4-bytes key column of Bucket table UInt32 *coef4; UInt32 hash_value4; UInt64 *bcktab_key8; //' 8-bytes key column of Bucket table UInt64 *coef8; UInt64 hash_value8; //' --- /// Constructors ESA(const UInt32 & size_, SYMBOL *text_, int verb=INFO); /// Destructor virtual ~ESA(); /// Construct child table ErrorCode ConstructChildTable(); /// Get suffix link interval ErrorCode GetSuflink(const UInt32 &i, const UInt32 &j, UInt32 &sl_i, UInt32 &sl_j); /// Find the suffix link ErrorCode FindSuflink(const UInt32 &parent_i, const UInt32 &parent_j, const UInt32 &child_i, const UInt32 &child_j, UInt32 &sl_i, UInt32 &sl_j); /// Construct suffix link table ErrorCode ConstructSuflink(); /// Construct bucket table ErrorCode ConstructBcktab(const UInt32 &alphabet_size=256); /// Get all non-singleton child-intervals ErrorCode GetChildIntervals(const UInt32 &lb, const UInt32 &rb, std::vector > &q); /// Get intervals by index ErrorCode GetIntervalByIndex(const UInt32 &parent_i, const UInt32 &parent_j, const UInt32 &start_idx, UInt32 &child_i, UInt32 &child_j); /// Get intervals by character ErrorCode GetIntervalByChar(const UInt32 &parent_i, const UInt32 &parent_j, const SYMBOL &start_ch, const UInt32 &depth, UInt32 &child_i, UInt32 &child_j); /// Get lcp value ErrorCode GetLcp(const UInt32 &i, const UInt32 &j, UInt32 &val); /// Compare pattern to text[suftab[idx]..length]. ErrorCode Compare(const UInt32 &idx, const UInt32 &depth, SYMBOL *pattern, const UInt32 &p_len, UInt32 &matched_len); /// Find longest substring of pattern in enhanced suffix array. ErrorCode Match(const UInt32 &i, const UInt32 &j, SYMBOL *pattern, const UInt32 p_len, UInt32 &lb, UInt32 &rb, UInt32 &matched_len); /// Similar to Match() but returns also floor interval of [lb..rb] ErrorCode ExactSuffixMatch(const UInt32 &i, const UInt32 &j, const UInt32 &offset, SYMBOL *pattern, const UInt32 p_len, UInt32 &lb, UInt32 &rb, UInt32 &matched_len, UInt32 &floor_lb, UInt32 &floor_rb, UInt32 &floor_len); }; #endif kernlab/src/cweight.cpp0000644000175100001440000000427412774400037014646 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ConstantWeight.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 // 12 Oct 2006 #ifndef CWEIGHT_CPP #define CWEIGHT_CPP #include "cweight.h" #include /** * Constant weight function. Computes number of common substrings. Every * matched substring is of same weight (i.e. 1) * W(y,t) := tau - gamma * * \param floor_len - (IN) Length of floor interval of matched substring. * (cf. gamma in VisSmo02). * \param x_len - (IN) Length of the matched substring. * (cf. tau in visSmo02). * \param weight - (OUT) The weight value. * */ ErrorCode ConstantWeight::ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight) { //' Input validation assert(x_len >= floor_len); //' x_len == floor_len when the substring found ends on an interval. weight = (x_len - floor_len); // std::cout << "floor_len : " << floor_len // << " x_len : " << x_len // << " weight : " << weight << std::endl; return NOERROR; } #endif kernlab/src/errorcode.h0000644000175100001440000000374312774400037014645 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ErrorCode.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 #ifndef _ERRORCODE_H_ #define _ERRORCODE_H_ #include "datatype.h" #include // Verbosity level enum verbosity {QUIET, INFO, DEBUG1}; #define ErrorCode UInt32 /** * for general use */ #define NOERROR 0 #define GENERAL_ERROR 1 #define MEM_ALLOC_FAILED 2 #define INVALID_PARAM 3 #define ARRAY_EMPTY 4 #define OPERATION_FAILED 5 /** * SuffixArray */ #define MATCH_NOT_FOUND 101 #define PARTIAL_MATCH 102 /** * LCP */ #define LCP_COMPACT_FAILED 201 #define CHECKERROR(i) { \ if((i) != NOERROR) { \ exit(EXIT_FAILURE); \ } \ } // #define MESSAGE(msg) { std::cout<<(msg)< #include #include extern double mymin(double, double); extern double mymax(double, double); extern void *xmalloc(size_t); /* LEVEL 1 BLAS */ /*extern double ddot_(int *, double *, int *, double *, int *);*/ /*extern int daxpy_(int *, double *, double *, int *, double *, int *);*/ /* LEVEL 2 BLAS */ /*extern int dsymv_(char *, int *, double *, double *, int *, double *, int *, double *, double *, int *);*/ /* MINPACK 2 */ extern void dbreakpt(int, double *, double *, double *, double *, int *, double *, double *); extern void dgpstep(int, double *, double *, double *, double, double *, double *); void dprsrch(int n, double *x, double *xl, double *xu, double *A, double *g, double *w) { /* c ********** c c Subroutine dprsrch c c This subroutine uses a projected search to compute a step c that satisfies a sufficient decrease condition for the quadratic c c q(s) = 0.5*s'*A*s + g'*s, c c where A is a symmetric matrix and g is a vector. Given the c parameter alpha, the step is c c s[alpha] = P[x + alpha*w] - x, c c where w is the search direction and P the projection onto the c n-dimensional interval [xl,xu]. The final step s = s[alpha] c satisfies the sufficient decrease condition c c q(s) <= mu_0*(g'*s), c c where mu_0 is a constant in (0,1). c c The search direction w must be a descent direction for the c quadratic q at x such that the quadratic is decreasing c in the ray x + alpha*w for 0 <= alpha <= 1. c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x specifies the vector x. c On exit x is set to the final point P[x + alpha*w]. c c xl is a double precision array of dimension n. c On entry xl is the vector of lower bounds. c On exit xl is unchanged. c c xu is a double precision array of dimension n. c On entry xu is the vector of upper bounds. c On exit xu is unchanged. c c A is a double precision array of dimension n*n. c On entry A specifies the matrix A c On exit A is unchanged. c c g is a double precision array of dimension n. c On entry g specifies the vector g. c On exit g is unchanged. c c w is a double prevision array of dimension n. c On entry w specifies the search direction. c On exit w is the step s[alpha]. c c ********** */ double one = 1, zero = 0; /* Constant that defines sufficient decrease. */ /* Interpolation factor. */ double mu0 = 0.01, interpf = 0.5; double *wa1 = (double *) xmalloc(sizeof(double)*n); double *wa2 = (double *) xmalloc(sizeof(double)*n); /* Set the initial alpha = 1 because the quadratic function is decreasing in the ray x + alpha*w for 0 <= alpha <= 1 */ double alpha = 1, brptmin, brptmax, gts, q; int search = 1, nbrpt, nsteps = 0, i, inc = 1; /* Find the smallest break-point on the ray x + alpha*w. */ dbreakpt(n, x, xl, xu, w, &nbrpt, &brptmin, &brptmax); /* Reduce alpha until the sufficient decrease condition is satisfied or x + alpha*w is feasible. */ while (search && alpha > brptmin) { /* Calculate P[x + alpha*w] - x and check the sufficient decrease condition. */ nsteps++; dgpstep(n, x, xl, xu, alpha, w, wa1); F77_CALL(dsymv)("U", &n, &one, A, &n, wa1, &inc, &zero, wa2, &inc); gts = F77_CALL(ddot)(&n, g, &inc, wa1, &inc); q = 0.5*F77_CALL(ddot)(&n, wa1, &inc, wa2, &inc) + gts; if (q <= mu0*gts) search = 0; else /* This is a crude interpolation procedure that will be replaced in future versions of the code. */ alpha *= interpf; } /* Force at least one more constraint to be added to the active set if alpha < brptmin and the full step is not successful. There is sufficient decrease because the quadratic function is decreasing in the ray x + alpha*w for 0 <= alpha <= 1. */ if (alpha < 1 && alpha < brptmin) alpha = brptmin; /* Compute the final iterate and step. */ dgpstep(n, x, xl, xu, alpha, w, wa1); F77_CALL(daxpy)(&n, &alpha, w, &inc, x, &inc); for (i=0;i * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/KSpectrumWeight.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 #ifndef KSPECTRUMWEIGHT_CPP #define KSPECTRUMWEIGHT_CPP #include "kspectrumweight.h" #include /** * K-spectrum weight function. Compute number of common (exactly) k character substring. * * \param floor_len - (IN) Length of floor interval of matched substring. (cf. gamma in VisSmo02). * \param x_len - (IN) Length of the matched substring. (cf. tau in VisSmo02). * \param weight - (OUT) The weight value. * */ ErrorCode KSpectrumWeight::ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight) { //' Input validation assert(x_len >= floor_len); //' x_len == floor_len when the substring found ends on an interval. weight = 0.0; if(floor_len < k && x_len >= k) weight = 1.0; // std::cout << "floor_len : " << floor_len // << " x_len : " << x_len // << " weight : " << weight << std::endl; return NOERROR; } #endif //' Question: Why return only 0 or 1? //' Answer : In k-spectrum method, any length of matched substring other than k //' does not play a significant role in the string kernel. So, returning 1 //' means that the substring weight equals to # of suffix in the current interval. //' When 0 is returned, it means that substring weight equals to the floor //' interval entry in val[]. (See the definition of substring weight in //' StringKernel.cpp) //' Question: Why is the following a correct implementation of k-spectrum ? //' Answer : [Val precomputation phase] Every Interval with lcp < k has val := 0. //' For intervals with (lcp==k) or (lcp>k but floor_lcp= k but floor interval //' has val := 0 (floor_lcp < k). Hence, returning weight:=1 will make substring //' weight equals to the size of the immediate ceil interval (# of substring in common). kernlab/src/svm.h0000644000175100001440000000235612774400037013465 0ustar hornikusers#ifndef _LIBSVM_H #define _LIBSVM_H #ifdef __cplusplus extern "C" { #endif struct svm_node { int index; double value; }; struct svm_problem { int l, n; double *y; struct svm_node **x; }; enum { C_SVC, NU_SVC, ONE_CLASS, EPSILON_SVR, NU_SVR, C_BSVC, EPSILON_BSVR, SPOC, KBB }; /* svm_type */ enum { LINEAR, POLY, RBF, SIGMOID, R, LAPLACE, BESSEL, ANOVA, SPLINE }; /* kernel_type */ struct svm_parameter { int svm_type; int kernel_type; int degree; /* for poly */ double gamma; /* for poly/rbf/sigmoid */ double coef0; /* for poly/sigmoid */ /* these are for training only */ double cache_size; /* in MB */ double eps; /* stopping criteria */ double C; /* for C_SVC, EPSILON_SVR and NU_SVR */ int nr_weight; /* for C_SVC */ int *weight_label; /* for C_SVC */ double* weight; /* for C_SVC */ double nu; /* for NU_SVC, ONE_CLASS, and NU_SVR */ double p; /* for EPSILON_SVR */ int shrinking; /* use the shrinking heuristics */ int qpsize; double Cbegin, Cstep; /* for linear kernel */ double lim; /* for bessel kernel */ double *K; /* pointer to kernel matrix */ int m; }; struct BQP { double eps; int n; double *x, *C, *Q, *p; }; #ifdef __cplusplus } #endif #endif /* _LIBSVM_H */ kernlab/src/dprecond.c0000644000175100001440000000200012774400037014433 0ustar hornikusers#include #include #include #include /* LAPACK */ /* extern int dpotf2_(char *, int *, double *, int *, int *); */ double dcholfact(int n, double *A, double *L) { /* if A is p.d. , A = L*L' if A is p.s.d. , A + lambda*I = L*L'; */ int indef, i; static double lambda = 1e-3/512/512; memcpy(L, A, sizeof(double)*n*n); F77_CALL(dpotf2)("L", &n, L, &n, &indef); if (indef != 0) { memcpy(L, A, sizeof(double)*n*n); for (i=0;i> 8) | (value << 8)) #define SUFFIX_SORTED 0x80000000 // flag marks suffix as sorted. #define END_OF_CHAIN 0x3ffffffe // marks the end of a chain #define SORTED_BY_ENHANCED_INDUCTION 0x3fffffff // marks suffix which will be sorted by enhanced induction sort. #ifdef SORT_16_BIT_SYMBOLS #define SYMBOL_TYPE unsigned short #else #define SYMBOL_TYPE unsigned char #endif class MSufSort { public: MSufSort(); virtual ~MSufSort(); unsigned int Sort(SYMBOL_TYPE * source, unsigned int sourceLength); unsigned int GetElapsedSortTime(); unsigned int GetMemoryUsage(); unsigned int ISA(unsigned int index); bool VerifySort(); static void ReverseAltSortOrder(SYMBOL_TYPE * data, unsigned int nBytes); private: int CompareStrings(SYMBOL_TYPE * stringA, SYMBOL_TYPE * stringB, int len); bool IsTandemRepeat2(); bool IsTandemRepeat(); void PassTandemRepeat(); bool IsSortedByInduction(); bool IsSortedByEnhancedInduction(unsigned int suffixIndex); void ProcessSuffixesSortedByInduction(); // MarkSuffixAsSorted // Sets the final inverse suffix array value for a given suffix. // Also invokes the OnSortedSuffix member function. void MarkSuffixAsSorted(unsigned int suffixIndex, unsigned int & sortedIndex); void MarkSuffixAsSorted2(unsigned int suffixIndex, unsigned int & sortedIndex); void MarkSuffixAsSortedByEnhancedInductionSort(unsigned int suffixIndex); // PushNewChainsOntoStack: // Moves all new suffix chains onto the stack of partially sorted // suffixes. (makes them ready for further sub sorting). void PushNewChainsOntoStack(bool originalChains = false); void PushTandemBypassesOntoStack(); // OnSortedSuffix: // Event which is invoked with each sorted suffix at the time of // its sorting. virtual void OnSortedSuffix(unsigned int suffixIndex); // Initialize: // Initializes this object just before sorting begins. void Initialize(); // InitialSort: // This is the first sorting pass which makes the initial suffix // chains from the given source string. Pushes these chains onto // the stack for further sorting. void InitialSort(); // Value16: // Returns the two 8 bit symbols located // at positions N and N + 1 where N = the sourceIndex. unsigned short Value16(unsigned int sourceIndex); // ProcessChain: // Sorts the suffixes of a given chain by the next two symbols of // each suffix in the chain. This creates zero or more new suffix // chains with each sorted by two more symbols than the original // chain. Then pushes these new chains onto the chain stack for // further sorting. void ProcessNextChain(); void AddToSuffixChain(unsigned int suffixIndex, unsigned short suffixChain); void AddToSuffixChain(unsigned int firstSuffixIndex, unsigned int lastSuffixIndex, unsigned short suffixChain); void ProcessSuffixesSortedByEnhancedInduction(unsigned short suffixId); void ResolveTandemRepeatsNotSortedWithInduction(); unsigned int m_sortTime; Stack m_chainMatchLengthStack; Stack m_chainCountStack; Stack m_chainHeadStack; unsigned int m_endOfSuffixChain[0x10000]; unsigned int m_startOfSuffixChain[0x10000]; // m_source: // Address of the string to sort. SYMBOL_TYPE * m_source; // m_sourceLength: // The length of the string pointed to by m_source. unsigned int m_sourceLength; unsigned int m_sourceLengthMinusOne; // m_ISA: // The address of the working space which, when the sort is // completed, will contain the inverse suffix array for the // source string. unsigned int * m_ISA; // m_nextSortedSuffixValue: unsigned int m_nextSortedSuffixValue; // unsigned int m_numSortedSuffixes; // m_newChainIds // Array containing the valid chain numbers in m_newChain array. unsigned short m_newChainIds[0x10000]; // m_numNewChains: // The number of new suffix chain ids stored in m_numChainIds. unsigned int m_numNewChains; Stack m_suffixesSortedByInduction; unsigned int m_suffixMatchLength; unsigned int m_currentSuffixIndex; // m_firstSortedPosition: // For use with enhanced induction sorting. unsigned int m_firstSortedPosition[0x10000]; unsigned int m_firstSuffixByEnhancedInductionSort[0x10000]; unsigned int m_lastSuffixByEnhancedInductionSort[0x10000]; unsigned int m_currentSuffixChainId; #ifdef SHOW_PROGRESS // ShowProgress: // Update the progress indicator. void ShowProgress(); // m_nextProgressUpdate: // Indicates when to update the progress indicator. unsigned int m_nextProgressUpdate; // m_progressUpdateIncrement: // Indicates how many suffixes should be sorted before // incrementing the progress indicator. unsigned int m_progressUpdateIncrement; #endif // members used if alternate sorting order should be applied. SYMBOL_TYPE m_forwardAltSortOrder[256]; static SYMBOL_TYPE m_reverseAltSortOrder[256]; // for tandem repeat sorting bool m_hasTandemRepeatSortedByInduction; unsigned int m_firstUnsortedTandemRepeat; unsigned int m_lastUnsortedTandemRepeat; bool m_hasEvenLengthTandemRepeats; unsigned int m_tandemRepeatDepth; unsigned int m_firstSortedTandemRepeat; unsigned int m_lastSortedTandemRepeat; unsigned int m_tandemRepeatLength; }; //inline unsigned short MSufSort::Value16(unsigned int sourceIndex) //{ // return (sourceIndex < m_sourceLengthMinusOne) ? *(unsigned short *)(m_source + sourceIndex) : m_source[sourceIndex]; //} // fix by Brian Ripley inline unsigned short MSufSort::Value16(unsigned int sourceIndex) { union {unsigned short u; unsigned char b[2];} u16; u16.b[0] = m_source[sourceIndex]; u16.b[1] = (sourceIndex < m_sourceLengthMinusOne) ? m_source[sourceIndex + 1] : 0; return u16.u; } inline bool MSufSort::IsSortedByInduction() { unsigned int n = m_currentSuffixIndex + m_suffixMatchLength - 1; #ifndef USE_INDUCTION_SORTING if (n < m_sourceLengthMinusOne) return false; #endif if ((m_ISA[n] & SUFFIX_SORTED) && ((m_ISA[n] & 0x3fffffff) < m_nextSortedSuffixValue)) { InductionSortObject i(0, m_ISA[n], m_currentSuffixIndex); m_suffixesSortedByInduction.Push(i); } else if ((m_ISA[n + 1] & SUFFIX_SORTED) && ((m_ISA[n + 1] & 0x3fffffff) < m_nextSortedSuffixValue)) { InductionSortObject i(1, m_ISA[n + 1], m_currentSuffixIndex); m_suffixesSortedByInduction.Push(i); } else return false; return true; } inline bool MSufSort::IsSortedByEnhancedInduction(unsigned int suffixIndex) { if (suffixIndex > 0) if (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION) return true; return false; } inline bool MSufSort::IsTandemRepeat() { #ifndef USE_TANDEM_REPEAT_SORTING return false; #else if ((!m_tandemRepeatDepth) && (m_currentSuffixIndex + m_suffixMatchLength) == (m_ISA[m_currentSuffixIndex] + 1)) return true; #ifndef SORT_16_BIT_SYMBOLS if ((!m_tandemRepeatDepth) && ((m_currentSuffixIndex + m_suffixMatchLength) == (m_ISA[m_currentSuffixIndex]))) { m_hasEvenLengthTandemRepeats = true; return false; } #endif return false; #endif } inline void MSufSort::PassTandemRepeat() { unsigned int nextIndex; unsigned int lastIndex; // unsigned int firstIndex = m_currentSuffixIndex; while ((m_currentSuffixIndex + m_suffixMatchLength) == ((nextIndex = m_ISA[m_currentSuffixIndex]) + 1)) { lastIndex = m_currentSuffixIndex; m_currentSuffixIndex = nextIndex; } if (IsSortedByInduction()) { m_hasTandemRepeatSortedByInduction = true; m_currentSuffixIndex = m_ISA[m_currentSuffixIndex]; } else { if (m_firstUnsortedTandemRepeat == END_OF_CHAIN) m_firstUnsortedTandemRepeat = m_lastUnsortedTandemRepeat = lastIndex; else m_lastUnsortedTandemRepeat = (m_ISA[m_lastUnsortedTandemRepeat] = lastIndex); } } inline void MSufSort::PushNewChainsOntoStack(bool originalChains) { // Moves all new suffix chains onto the stack of partially sorted // suffixes. (makes them ready for further sub sorting). #ifdef SORT_16_BIT_SYMBOLS unsigned int newSuffixMatchLength = m_suffixMatchLength + 1; #else unsigned int newSuffixMatchLength = m_suffixMatchLength + 2; #endif if (m_numNewChains) { if (m_hasEvenLengthTandemRepeats) { m_chainCountStack.Push(m_numNewChains - 1); m_chainMatchLengthStack.Push(newSuffixMatchLength); m_chainCountStack.Push(1); m_chainMatchLengthStack.Push(newSuffixMatchLength - 1); } else { m_chainCountStack.Push(m_numNewChains); m_chainMatchLengthStack.Push(newSuffixMatchLength); } if (m_numNewChains > 1) IntroSort(m_newChainIds, m_numNewChains); while (m_numNewChains) { unsigned short chainId = m_newChainIds[--m_numNewChains]; chainId = ENDIAN_SWAP_16(chainId); // unsigned int n = m_startOfSuffixChain[chainId]; m_chainHeadStack.Push(m_startOfSuffixChain[chainId]); m_startOfSuffixChain[chainId] = END_OF_CHAIN; m_ISA[m_endOfSuffixChain[chainId]] = END_OF_CHAIN; } } m_hasEvenLengthTandemRepeats = false; if (m_firstUnsortedTandemRepeat != END_OF_CHAIN) { // Tandem repeats with a terminating suffix that did not get // sorted via induction has occurred (at least once). // We have a suffix chain (indicated by m_firstTandemRepeatWithoutSuffix) // of the suffix in each tandem repeat which immediately proceeded the // terminating suffix in each chain. We want to sort them relative to // each other and then process the tandem repeats. unsigned int tandemRepeatLength = m_suffixMatchLength - 1; unsigned int numChains = m_chainHeadStack.Count(); m_chainHeadStack.Push(m_firstUnsortedTandemRepeat); m_chainCountStack.Push(1); m_chainMatchLengthStack.Push((m_suffixMatchLength << 1) - 1); m_ISA[m_lastUnsortedTandemRepeat] = END_OF_CHAIN; m_firstUnsortedTandemRepeat = END_OF_CHAIN; m_tandemRepeatDepth = 1; while (m_chainHeadStack.Count() > numChains) ProcessNextChain(); m_suffixMatchLength = tandemRepeatLength + 1; ResolveTandemRepeatsNotSortedWithInduction(); m_tandemRepeatDepth = 0; } } inline void MSufSort::AddToSuffixChain(unsigned int suffixIndex, unsigned short suffixChain) { if (m_startOfSuffixChain[suffixChain] == END_OF_CHAIN) { m_endOfSuffixChain[suffixChain] = m_startOfSuffixChain[suffixChain] = suffixIndex; m_newChainIds[m_numNewChains++] = ENDIAN_SWAP_16(suffixChain); } else m_endOfSuffixChain[suffixChain] = m_ISA[m_endOfSuffixChain[suffixChain]] = suffixIndex; } inline void MSufSort::AddToSuffixChain(unsigned int firstSuffixIndex, unsigned int lastSuffixIndex, unsigned short suffixChain) { if (m_startOfSuffixChain[suffixChain] == END_OF_CHAIN) { m_startOfSuffixChain[suffixChain] = firstSuffixIndex; m_endOfSuffixChain[suffixChain] = lastSuffixIndex; m_newChainIds[m_numNewChains++] = ENDIAN_SWAP_16(suffixChain); } else { m_ISA[m_endOfSuffixChain[suffixChain]] = firstSuffixIndex; m_endOfSuffixChain[suffixChain] = lastSuffixIndex; } } inline void MSufSort::OnSortedSuffix(unsigned int suffixIndex) { // Event which is invoked with each sorted suffix at the time of // its sorting. m_numSortedSuffixes++; #ifdef SHOW_PROGRESS if (m_numSortedSuffixes >= m_nextProgressUpdate) { m_nextProgressUpdate += m_progressUpdateIncrement; ShowProgress(); } #endif } #ifdef SORT_16_BIT_SYMBOLS inline void MSufSort::MarkSuffixAsSorted(unsigned int suffixIndex, unsigned int & sortedIndex) { // Sets the final inverse suffix array value for a given suffix. // Also invokes the OnSortedSuffix member function. if (m_tandemRepeatDepth) { // we are processing a list of suffixes which we the second to last in tandem repeats // that were not terminated via induction. These suffixes are not actually to be // marked as sorted yet. Instead, they are to be linked together in sorted order. if (m_firstSortedTandemRepeat == END_OF_CHAIN) m_firstSortedTandemRepeat = m_lastSortedTandemRepeat = suffixIndex; else m_lastSortedTandemRepeat = (m_ISA[m_lastSortedTandemRepeat] = suffixIndex); return; } m_ISA[suffixIndex] = (sortedIndex++ | SUFFIX_SORTED); #ifdef SHOW_PROGRESS OnSortedSuffix(suffixIndex); #endif #ifdef USE_ENHANCED_INDUCTION_SORTING if ((suffixIndex) && (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION)) { suffixIndex--; unsigned short symbol = Value16(suffixIndex); m_ISA[suffixIndex] = (m_firstSortedPosition[symbol]++ | SUFFIX_SORTED); #ifdef SHOW_PROGRESS OnSortedSuffix(suffixIndex); #endif if ((suffixIndex) && (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION)) { suffixIndex--; symbol = ENDIAN_SWAP_16(symbol); if (m_firstSuffixByEnhancedInductionSort[symbol] == END_OF_CHAIN) m_firstSuffixByEnhancedInductionSort[symbol] = m_lastSuffixByEnhancedInductionSort[symbol] = suffixIndex; else { m_ISA[m_lastSuffixByEnhancedInductionSort[symbol]] = suffixIndex; m_lastSuffixByEnhancedInductionSort[symbol] = suffixIndex; } } } #endif } inline void MSufSort::MarkSuffixAsSorted2(unsigned int suffixIndex, unsigned int & sortedIndex) { // Sets the final inverse suffix array value for a given suffix. // Also invokes the OnSortedSuffix member function. if (m_tandemRepeatDepth) { // we are processing a list of suffixes which we the second to last in tandem repeats // that were not terminated via induction. These suffixes are not actually to be // marked as sorted yet. Instead, they are to be linked together in sorted order. if (m_firstSortedTandemRepeat == END_OF_CHAIN) m_firstSortedTandemRepeat = m_lastSortedTandemRepeat = suffixIndex; else m_lastSortedTandemRepeat = (m_ISA[m_lastSortedTandemRepeat] = suffixIndex); return; } m_ISA[suffixIndex] = (sortedIndex++ | SUFFIX_SORTED); #ifdef SHOW_PROGRESS OnSortedSuffix(suffixIndex); #endif #ifdef USE_ENHANCED_INDUCTION_SORTING if ((suffixIndex) && (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION)) { unsigned short symbol = Value16(suffixIndex); symbol = ENDIAN_SWAP_16(symbol); suffixIndex--; if (m_firstSuffixByEnhancedInductionSort[symbol] == END_OF_CHAIN) m_firstSuffixByEnhancedInductionSort[symbol] = m_lastSuffixByEnhancedInductionSort[symbol] = suffixIndex; else { m_ISA[m_lastSuffixByEnhancedInductionSort[symbol]] = suffixIndex; m_lastSuffixByEnhancedInductionSort[symbol] = suffixIndex; } } #endif } #else inline void MSufSort::MarkSuffixAsSorted(unsigned int suffixIndex, unsigned int & sortedIndex) { // Sets the final inverse suffix array value for a given suffix. // Also invokes the OnSortedSuffix member function. if (m_tandemRepeatDepth) { // we are processing a list of suffixes which we the second to last in tandem repeats // that were not terminated via induction. These suffixes are not actually to be // marked as sorted yet. Instead, they are to be linked together in sorted order. if (m_firstSortedTandemRepeat == END_OF_CHAIN) m_firstSortedTandemRepeat = m_lastSortedTandemRepeat = suffixIndex; else m_lastSortedTandemRepeat = (m_ISA[m_lastSortedTandemRepeat] = suffixIndex); return; } m_ISA[suffixIndex] = (sortedIndex++ | SUFFIX_SORTED); #ifdef SHOW_PROGRESS OnSortedSuffix(suffixIndex); #endif #ifdef USE_ENHANCED_INDUCTION_SORTING if ((suffixIndex) && (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION)) { suffixIndex--; unsigned short symbol = Value16(suffixIndex); m_ISA[suffixIndex] = (m_firstSortedPosition[symbol]++ | SUFFIX_SORTED); #ifdef SHOW_PROGRESS OnSortedSuffix(suffixIndex); #endif if ((suffixIndex) && (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION)) { suffixIndex--; unsigned short symbol2 = symbol; symbol = Value16(suffixIndex); m_ISA[suffixIndex] = (m_firstSortedPosition[symbol]++ | SUFFIX_SORTED); #ifdef SHOW_PROGRESS OnSortedSuffix(suffixIndex); #endif if ((suffixIndex) && (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION)) { if (m_source[suffixIndex] < m_source[suffixIndex + 1]) symbol2 = ENDIAN_SWAP_16(symbol); else symbol2 = ENDIAN_SWAP_16(symbol2); suffixIndex--; if (m_firstSuffixByEnhancedInductionSort[symbol2] == END_OF_CHAIN) m_firstSuffixByEnhancedInductionSort[symbol2] = m_lastSuffixByEnhancedInductionSort[symbol2] = suffixIndex; else { m_ISA[m_lastSuffixByEnhancedInductionSort[symbol2]] = suffixIndex; m_lastSuffixByEnhancedInductionSort[symbol2] = suffixIndex; } } } } #endif } inline void MSufSort::MarkSuffixAsSorted2(unsigned int suffixIndex, unsigned int & sortedIndex) { // Sets the final inverse suffix array value for a given suffix. // Also invokes the OnSortedSuffix member function. if (m_tandemRepeatDepth) { // we are processing a list of suffixes which we the second to last in tandem repeats // that were not terminated via induction. These suffixes are not actually to be // marked as sorted yet. Instead, they are to be linked together in sorted order. if (m_firstSortedTandemRepeat == END_OF_CHAIN) m_firstSortedTandemRepeat = m_lastSortedTandemRepeat = suffixIndex; else m_lastSortedTandemRepeat = (m_ISA[m_lastSortedTandemRepeat] = suffixIndex); return; } m_ISA[suffixIndex] = (sortedIndex++ | SUFFIX_SORTED); #ifdef SHOW_PROGRESS OnSortedSuffix(suffixIndex); #endif #ifdef USE_ENHANCED_INDUCTION_SORTING if ((suffixIndex) && (m_ISA[suffixIndex - 1] == SORTED_BY_ENHANCED_INDUCTION)) { unsigned short symbol; if (m_source[suffixIndex] < m_source[suffixIndex + 1]) symbol = Value16(suffixIndex); else symbol = Value16(suffixIndex + 1); symbol = ENDIAN_SWAP_16(symbol); suffixIndex--; if (m_firstSuffixByEnhancedInductionSort[symbol] == END_OF_CHAIN) m_firstSuffixByEnhancedInductionSort[symbol] = m_lastSuffixByEnhancedInductionSort[symbol] = suffixIndex; else { m_ISA[m_lastSuffixByEnhancedInductionSort[symbol]] = suffixIndex; m_lastSuffixByEnhancedInductionSort[symbol] = suffixIndex; } } #endif } #endif inline void MSufSort::ProcessNextChain() { // Sorts the suffixes of a given chain by the next two symbols of // each suffix in the chain. This creates zero or more new suffix // chains with each sorted by two more symbols than the original // chain. Then pushes these new chains onto the chain stack for // further sorting. while (--m_chainCountStack.Top() < 0) { m_chainCountStack.Pop(); m_chainMatchLengthStack.Pop(); } m_suffixMatchLength = m_chainMatchLengthStack.Top(); m_currentSuffixIndex = m_chainHeadStack.Pop(); #ifdef USE_ENHANCED_INDUCTION_SORTING if (m_chainMatchLengthStack.Count() == 1) { // one of the original buckets from InitialSort(). This is important // when enhanced induction sorting is enabled. unsigned short chainId = Value16(m_currentSuffixIndex); unsigned short temp = chainId; chainId = ENDIAN_SWAP_16(chainId); while (m_currentSuffixChainId <= chainId) ProcessSuffixesSortedByEnhancedInduction(m_currentSuffixChainId++); m_nextSortedSuffixValue = m_firstSortedPosition[temp]; } #endif if (m_ISA[m_currentSuffixIndex] == END_OF_CHAIN) MarkSuffixAsSorted(m_currentSuffixIndex, m_nextSortedSuffixValue); // only one suffix in bucket so it is sorted. else { do { if (IsTandemRepeat()) PassTandemRepeat(); else if ((m_currentSuffixIndex != END_OF_CHAIN) && (IsSortedByInduction())) m_currentSuffixIndex = m_ISA[m_currentSuffixIndex]; else { unsigned int firstSuffixIndex = m_currentSuffixIndex; unsigned int lastSuffixIndex = m_currentSuffixIndex; unsigned short targetSymbol = Value16(m_currentSuffixIndex + m_suffixMatchLength); unsigned int nextSuffix; do { nextSuffix = m_ISA[lastSuffixIndex = m_currentSuffixIndex]; if ((m_currentSuffixIndex = nextSuffix) == END_OF_CHAIN) break; else if (IsTandemRepeat()) { PassTandemRepeat(); break; } else if (IsSortedByInduction()) { m_currentSuffixIndex = m_ISA[nextSuffix]; break; } } while (Value16(m_currentSuffixIndex + m_suffixMatchLength) == targetSymbol); AddToSuffixChain(firstSuffixIndex, lastSuffixIndex, targetSymbol); } } while (m_currentSuffixIndex != END_OF_CHAIN); ProcessSuffixesSortedByInduction(); PushNewChainsOntoStack(); } } inline void MSufSort::ProcessSuffixesSortedByInduction() { unsigned int numSuffixes = m_suffixesSortedByInduction.Count(); if (numSuffixes) { InductionSortObject * objects = m_suffixesSortedByInduction.m_stack; if (numSuffixes > 1) IntroSort(objects, numSuffixes); if (m_hasTandemRepeatSortedByInduction) { // During the last pass some suffixes which were sorted via induction were also // determined to be the terminal suffix in a tandem repeat. So when we mark // the suffixes as sorted (where were sorted via induction) we make chain together // the preceding suffix in the tandem repeat (if there is one). unsigned int firstTandemRepeatIndex = END_OF_CHAIN; unsigned int lastTandemRepeatIndex = END_OF_CHAIN; unsigned int tandemRepeatLength = m_suffixMatchLength - 1; m_hasTandemRepeatSortedByInduction = false; for (unsigned int i = 0; i < numSuffixes; i++) { unsigned int suffixIndex = (objects[i].m_sortValue[1] & 0x3fffffff); if ((suffixIndex >= tandemRepeatLength) && (m_ISA[suffixIndex - tandemRepeatLength] == suffixIndex)) { // this suffix was a terminating suffix in a tandem repeat. // add the preceding suffix in the tandem repeat to the list. if (firstTandemRepeatIndex == END_OF_CHAIN) firstTandemRepeatIndex = lastTandemRepeatIndex = (suffixIndex - tandemRepeatLength); else lastTandemRepeatIndex = (m_ISA[lastTandemRepeatIndex] = (suffixIndex - tandemRepeatLength)); } MarkSuffixAsSorted(suffixIndex, m_nextSortedSuffixValue); } // now process each suffix in the tandem repeat list making each as sorted. // build a new list for tandem repeats which preceded each in the list until there are // no preceding tandem suffix for any suffix in the list. while (firstTandemRepeatIndex != END_OF_CHAIN) { m_ISA[lastTandemRepeatIndex] = END_OF_CHAIN; unsigned int suffixIndex = firstTandemRepeatIndex; firstTandemRepeatIndex = END_OF_CHAIN; while (suffixIndex != END_OF_CHAIN) { if ((suffixIndex >= tandemRepeatLength) && (m_ISA[suffixIndex - tandemRepeatLength] == suffixIndex)) { // this suffix was a terminating suffix in a tandem repeat. // add the preceding suffix in the tandem repeat to the list. if (firstTandemRepeatIndex == END_OF_CHAIN) firstTandemRepeatIndex = lastTandemRepeatIndex = (suffixIndex - tandemRepeatLength); else lastTandemRepeatIndex = (m_ISA[lastTandemRepeatIndex] = (suffixIndex - tandemRepeatLength)); } unsigned int nextSuffix = m_ISA[suffixIndex]; MarkSuffixAsSorted(suffixIndex, m_nextSortedSuffixValue); suffixIndex = nextSuffix; } } // finished. } else { // This is the typical branch on the condition. There were no tandem repeats // encountered during the last chain that were terminated with a suffix that // was sorted via induction. In this case we just mark the suffixes as sorted // and we are done. for (unsigned int i = 0; i < numSuffixes; i++) MarkSuffixAsSorted(objects[i].m_sortValue[1] & 0x3fffffff, m_nextSortedSuffixValue); } m_suffixesSortedByInduction.Clear(); } } inline void MSufSort::ProcessSuffixesSortedByEnhancedInduction(unsigned short suffixId) { // if (m_firstSuffixByEnhancedInductionSort[suffixId] != END_OF_CHAIN) { unsigned int currentSuffixIndex = m_firstSuffixByEnhancedInductionSort[suffixId]; unsigned int lastSuffixIndex = m_lastSuffixByEnhancedInductionSort[suffixId]; m_firstSuffixByEnhancedInductionSort[suffixId] = END_OF_CHAIN; m_lastSuffixByEnhancedInductionSort[suffixId] = END_OF_CHAIN; do { unsigned short symbol = Value16(currentSuffixIndex); unsigned int nextIndex = m_ISA[currentSuffixIndex]; MarkSuffixAsSorted2(currentSuffixIndex, m_firstSortedPosition[symbol]); if (currentSuffixIndex == lastSuffixIndex) { if (m_firstSuffixByEnhancedInductionSort[suffixId] == END_OF_CHAIN) return; currentSuffixIndex = m_firstSuffixByEnhancedInductionSort[suffixId]; lastSuffixIndex = m_lastSuffixByEnhancedInductionSort[suffixId]; m_firstSuffixByEnhancedInductionSort[suffixId] = END_OF_CHAIN; m_lastSuffixByEnhancedInductionSort[suffixId] = END_OF_CHAIN; } else currentSuffixIndex = nextIndex; } while (true); } } #ifdef SHOW_PROGRESS inline void MSufSort::ShowProgress() { // Update the progress indicator. //double p = ((double)(m_numSortedSuffixes & 0x3fffffff) / m_sourceLength) * 100; // printf("Progress: %.2f%% %c", p, 13); } #endif #endif kernlab/src/wkasailcp.h0000644000175100001440000000337712774400037014642 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/W_kasai_lcp.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 #ifndef W_KASAI_LCP_H #define W_KASAI_LCP_H #include "datatype.h" #include "errorcode.h" #include "ilcpfactory.h" #include "lcp.h" /** * Kasai et al's LCP array computation algorithm is * is slightly faster than Manzini's algorithm. However, * it needs inverse suffix array which costs extra memory. */ class W_kasai_lcp : public I_LCPFactory { public: /// Constructor W_kasai_lcp(){} /// Desctructor virtual ~W_kasai_lcp(){} /// Compute LCP array. ErrorCode ComputeLCP(const SYMBOL *text, const UInt32 &len, const UInt32 *sa, LCP& lcp); }; #endif kernlab/src/stringkernel.cpp0000644000175100001440000003370712774400037015726 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/StringKernel.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 // 10 Aug 2006 // 11 Oct 2006 #ifndef STRINGKERNEL_CPP #define STRINGKERNEL_CPP #include #include #include #include #include #include #include #include #include "stringkernel.h" StringKernel::StringKernel(): esa(0), weigher(0), val(0), lvs(0) {} /** * Construct string kernel given constructed enhanced suffix array. * * \param esa_ - ESA instance. */ StringKernel::StringKernel(ESA *esa_, int weightfn, Real param, int verb): esa(esa_), val(new Real[esa_->size + 1]), lvs(0), _verb(verb) { switch (weightfn) { case CONSTANT: weigher = new ConstantWeight(); break; case EXPDECAY: weigher = new ExpDecayWeight(param); break; case KSPECTRUM: weigher = new KSpectrumWeight(param); break; case BOUNDRANGE: weigher = new BoundedRangeWeight(param); break; default: weigher = new ConstantWeight(); //int nothing = 0; } } /** * Construct string kernel when given only text and its length. * * \param text - (IN) The text which SuffixArray and StringKernel correspond to. * \param text_length - (IN) The length of #_text#. * \param verb - (IN) Verbosity level. */ StringKernel::StringKernel(const UInt32 &size, SYMBOL *text, int weightfn, Real param, int verb): lvs(0), _verb(verb) { // Build ESA. esa = new ESA(size, text, verb); // Allocate memory space for #val# val = new Real[esa->size + 1]; // Instantiate weigher. switch (weightfn) { case CONSTANT: weigher = new ConstantWeight(); break; case EXPDECAY: weigher = new ExpDecayWeight(param); break; case KSPECTRUM: weigher = new KSpectrumWeight(param); break; case BOUNDRANGE: weigher = new BoundedRangeWeight(param); break; default: weigher = new ConstantWeight(); //int nothing = 0; } } /** * StringKernel destructor. * */ StringKernel::~StringKernel() { //' Delete objects and release allocated memory space. if (esa) { delete esa; esa = 0; } if (val) { delete [] val; val = 0; } if (lvs) { delete [] lvs; lvs = 0; } if (weigher) { delete weigher; weigher = 0; } } /** * An Iterative auxiliary function used in PrecomputeVal(). * * Note: Every lcp-interval can be represented by its first l-index. * Hence, 'val' is stored in val[] at the index := first l-index. * * Pre: val[] is initialised to 0. * * @param left Left bound of current interval * @param right Right bound of current interval */ void StringKernel::IterativeCompute(const UInt32 &left, const UInt32 &right) { //std::cout << "In IterativeCompute() " << std::endl; //' Variables queue > q; vector > childlist; pair p; UInt32 lb = 0; UInt32 rb = 0; UInt32 floor_len = 0; UInt32 x_len = 0; Real cur_val = 0.0; Real edge_weight = 0.0; //' Step 1: At root, 0-[0..size-1]. Store all non-single child-intervals onto #q#. lb = left; //' Should be equal to 0. rb = right; //' Should be equal to size-1. esa->GetChildIntervals(lb, rb, childlist); for (UInt32 jj = 0; jj < childlist.size(); jj++) q.push(childlist[jj]); //' Step 2: Do breadth-first traversal. For every interval, compute val and add //' it to all its non-singleton child-intervals' val-entries in val[]. //' Start with child-interval [i..j] of 0-[0..size-1]. //' assert(j != size-1) while (!q.empty()) { //' Step 2.1: Get an interval from queue, #q#. p = q.front(); q.pop(); //' step 2.2: Get the lcp of floor interval. UInt32 a = 0, b = 0; a = esa->lcptab[p.first]; //svnvish: BUGBUG // Glorious hack. We have to remove it later. // This gives the lcp of parent interval if (p.second < esa->size - 1) { b = esa->lcptab[p.second + 1]; } else { b = 0; } floor_len = (a > b) ? a : b; //' Step 2.3: Get the lcp of current interval. esa->GetLcp(p.first, p.second, x_len); //' Step 2.4: Compute val of current interval. weigher->ComputeWeight(floor_len, x_len, edge_weight); cur_val = edge_weight * (lvs[p.second + 1] - lvs[p.first]); //' Step 2.5: Add #cur_val# to val[]. UInt32 firstlIndex1 = 0; esa->childtab.l_idx(p.first, p.second, firstlIndex1); val[firstlIndex1] += cur_val; // std::cout << "p.first:"<GetChildIntervals(p.first, p.second, childlist); //' Step 2.7: (a) Add #cur_val# to child-intervals' val-entries in val[]. //' (b) Push child-interval onto #q#. for (UInt32 kk = 0; kk < childlist.size(); kk++) { //' (a) UInt32 firstlIndex2 = 0; pair tmp_p = childlist[kk]; if (esa->text[esa->suftab[tmp_p.first]] == SENTINEL) continue; esa->childtab.l_idx(tmp_p.first, tmp_p.second, firstlIndex2); // assert( val[firstlIndex2] == 0 ); val[firstlIndex2] = val[firstlIndex1]; // cur_val; //' (b) q.push(make_pair(tmp_p.first, tmp_p.second)); } } //std::cout << "Out IterativeCompute() " << std::endl; } /** * Precomputation of val(t) of string kernel. * Observation :Every internal node of a suffix tree can be represented by at * least one index of the corresponding lcp array. So, the val * of a node is stored in val[] at the index corresponding to that of * the fist representative lcp value in lcp[]. */ void StringKernel::PrecomputeVal() { //' Memory space requirement check. assert(val != 0); //' Initialise all val entries to zero! memset(val, 0, sizeof(Real)*esa->size + 1); //' Start iterative precomputation of val[] IterativeCompute(0, esa->size - 1); } /** * Compute k(text,x) by performing Chang and Lawler's matching statistics collection * algorithm on the enhanced suffix array. * * \param x - (IN) The input string which is to be evaluated together with * the text in esa. * \param x_len - (IN) The length of #x#. * \param value - (IN) The value of k(x,x'). */ void StringKernel::Compute_K(SYMBOL *x, const UInt32 &x_len, Real &value) { //' Variables UInt32 floor_i = 0; UInt32 floor_j = 0; UInt32 i = 0; UInt32 j = 0; UInt32 lb = 0; UInt32 rb = 0; UInt32 matched_len = 0; UInt32 offset = 0; UInt32 floor_len = 0; UInt32 firstlIndex = 0; Real edge_weight = 0.0; //' Initialisation value = 0.0; lb = 0; rb = esa->size - 1; //' for each suffix, xprime[k..xprime_len-1], find longest match in text for (UInt32 k = 0; k < x_len; k++) { //' Step 1: Matching esa->ExactSuffixMatch(lb, rb, offset, &x[k], x_len - k, i, j, matched_len, floor_i, floor_j, floor_len); //' Step 2: Get suffix link for [floor_i..floor_j] esa->GetSuflink(floor_i, floor_j, lb, rb); assert((floor_j - floor_i) <= (rb - lb)); //' Range check //' Step 3: Compute contribution of this matched substring esa->childtab.l_idx(floor_i, floor_j, firstlIndex); assert(firstlIndex > floor_i && firstlIndex <= floor_j); assert(floor_len <= matched_len); weigher->ComputeWeight(floor_len, matched_len, edge_weight); value += val[firstlIndex] + edge_weight * (lvs[j + 1] - lvs[i]); // std::cout << "i:"<size); //' Allocate memory space for lvs[] lvs = new (nothrow) Real[esa->size + 1]; assert(lvs); //' Assign leaf weight to lvs element according to its position in text. for (UInt32 j = 0; j < esa->size; j++) { pos = esa->suftab[j]; UInt32 *p = upper_bound(clen, clen + m, pos); //' O(log n) lvs[j + 1] = leafWeight[p - clen]; } //' Compute cumulative lvs[]. To be used in matching statistics computation later. lvs[0] = 0.0; partial_sum(lvs, lvs + esa->size + 1, lvs); //chteo: [101006] delete [] clen; clen = 0; } /** * Set lvs[i] = i, for i = 0 to esa->size * Memory space for lvs[] will be allocated. */ void StringKernel::Set_Lvs() { //' Clean up previous lvs, if any. if (lvs) { delete lvs; lvs = 0; } //' Allocate memory space for lvs[] lvs = new (nothrow) Real[esa->size + 1]; //' Check if memory correctly allocated. assert(lvs != 0); //' Range := [0..esa->size] UInt32 localsize = esa->size; for (UInt32 i = 0; i <= localsize; i++) lvs[i] = i; } #endif #include #include #include extern "C" { SEXP stringtv(SEXP rtext, // text document SEXP ltext, // list or vector of text documents to compute kvalues against SEXP nltext, // number of text documents in ltext SEXP vnchar, // number of characters in text SEXP vnlchar, // characters per document in ltext SEXP stype, // type of kernel SEXP param) // parameter for kernel { // R interface for text and list of text computation. Should return a vector of computed kernel values. // Construct ESASK UInt32 text_size = *INTEGER(vnchar); int number_ltext = *INTEGER(nltext); unsigned int *ltext_size = (unsigned int *) malloc (sizeof(unsigned int) * number_ltext); memcpy(ltext_size, INTEGER(vnlchar), number_ltext*sizeof(int)); int weightfn = *INTEGER(stype); const char *text = CHAR(STRING_ELT(rtext,0)); Real kparam = *REAL(param); double kVal; SEXP alpha; PROTECT(alpha = allocVector(REALSXP, number_ltext)); // Check if stringlength reported from R is correct if(strlen(text)!= text_size) text_size= strlen(text); StringKernel sk(text_size, (SYMBOL*)text, (weightfn - 1), kparam, 0); sk.Set_Lvs(); sk.PrecomputeVal(); for (int i=0; i * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ChildTable.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 #ifndef CTABLE_H #define CTABLE_H #include #include #include "datatype.h" #include "errorcode.h" #include "lcp.h" // using namespace std; /** * ChildTable represents the parent-child relationship between * the lcp-intervals of suffix array. * Reference: AboKurOhl04 */ class ChildTable : public std::vector { private: // childtab needs lcptab to differentiate between up, down, and // nextlIndex values. LCP& _lcptab; public: // Constructors ChildTable(const UInt32 &size, LCP& lcptab): std::vector(size), _lcptab(lcptab){} // Destructor virtual ~ChildTable() {} // Get first l-index of an l-[i..j] interval ErrorCode l_idx(const UInt32 &i, const UInt32 &j, UInt32 &idx); // .up field ErrorCode up(const UInt32 &idx, UInt32 &val); // .down field ErrorCode down(const UInt32 &idx, UInt32 &val); // .next field can be retrieved by accessing the array directly. friend std::ostream& operator << (std::ostream& os, const ChildTable& ct); }; #endif kernlab/src/brweight.h0000644000175100001440000000325412774400037014471 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/BoundedRangeWeight.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 #ifndef BRWEIGHT_H #define BRWEIGHT_H #include "datatype.h" #include "errorcode.h" #include "iweightfactory.h" #include //' Bounded Range weight class class BoundedRangeWeight : public I_WeightFactory { Real n; public: /// Constructor BoundedRangeWeight(const Real &n_=1): n(n_){} /// Destructor virtual ~BoundedRangeWeight(){} /// Compute weight ErrorCode ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight); }; #endif kernlab/src/isafactory.h0000644000175100001440000000306412774400037015021 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/I_SAFactory.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 //' Interface for Enhanced Suffix Array construction algorithms #ifndef I_SAFACTORY_H #define I_SAFACTORY_H #include "datatype.h" #include "errorcode.h" class I_SAFactory { public: ///Constructor I_SAFactory(){} ///Destructor virtual ~I_SAFactory(){} ///Methods virtual ErrorCode ConstructSA(SYMBOL *text, const UInt32 &len, UInt32 *&array) = 0; }; #endif kernlab/src/inductionsort.cpp0000644000175100001440000000264612774400037016121 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the MSufSort suffix sorting algorithm (Version 2.2). * * The Initial Developer of the Original Code is * Michael A. Maniscalco * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Michael A. Maniscalco * * ***** END LICENSE BLOCK ***** */ #include "inductionsort.h" InductionSortObject::InductionSortObject(unsigned int inductionPosition, unsigned int inductionValue, unsigned int suffixIndex) { // sort value is 64 bits long. // bits are ... // 63 - 60: induction position (0 - 15) // 59 - 29: induction value at induction position (0 - (2^30 -1)) // 28 - 0: suffix index for the suffix sorted by induction (0 - (2^30) - 1) m_sortValue[0] = inductionPosition << 28; m_sortValue[0] |= ((inductionValue & 0x3fffffff) >> 2); m_sortValue[1] = (inductionValue << 30); m_sortValue[1] |= suffixIndex; } kernlab/src/lcp.cpp0000644000175100001440000001264312774400037013771 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/LCP.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 11 Oct 2006 #ifndef LCP_CPP #define LCP_CPP #include "lcp.h" // Threshold for compacting LCP[] const Real THRESHOLD = 0.3; LCP::LCP(const UInt32 &size): _p_array(0), _idx_array(0), _val_array(0), _size(size), _is_compact(false), _beg(0), _end(0), _cache(0), _dist(0), array(new UInt32[size]){ } LCP::~LCP() { if(array) {delete [] array; array = 0;} if(_p_array) {delete [] _p_array; _p_array = 0;} if(_idx_array) {delete [] _idx_array; _idx_array = 0;} if(_val_array) {delete [] _val_array; _val_array = 0;} } /** * Compact initial/original lcp array of n elements (i.e. 4n bytes) * into a n byte array with 8 bytes of secondary storage. * */ ErrorCode LCP::compact(void){ // Validate pre-conditions //assert(!array.empty() && array.size() == _size); assert(array); // Already compact. Nothing to do if (_is_compact) return NOERROR; // Count number of lcp-values >= 255. // UInt32 idx_len = std::count_if(array.begin(), array.end(), // std::bind2nd(std::greater(),254)); #ifdef _RWSTD_NO_CLASS_PARTIAL_SPEC UInt32 idx_len = 0; std::count_if(array, array + _size, std::bind2nd(std::greater(),254), idx_len); #else UInt32 idx_len = std::count_if(array, array + _size, std::bind2nd(std::greater(),254)); #endif // Compact iff idx_len/|array| > THRESHOLD if((Real)idx_len/_size > THRESHOLD) { //std::cout<< "Not compacting " << std::endl; return NOERROR; } // std::cout<< "Compacting with : " << idx_len << std::endl; // We know how much space to use // _p_array.resize(_size); // _idx_array.resize(idx_len); // _val_array.resize(idx_len); _p_array = new Byte1[_size]; _idx_array = new UInt32[idx_len]; _val_array = new UInt32[idx_len]; // Hold pointers for later. Avoids function calls // _beg = _idx_array.begin(); // _end = _idx_array.end(); // _cache = _idx_array.begin(); _beg = _idx_array; _end = _idx_array + idx_len; _cache = _idx_array; _dist = 0; for(UInt32 i=0, j=0; i<_size; i++) { if(array[i] < 255){ _p_array[i] = array[i]; }else { _p_array[i] = 255; _idx_array[j] = i; _val_array[j] = array[i]; j++; } } //array.resize(0); // array.clear(); delete [] array; array = 0; _is_compact = true; return NOERROR; } /** * Retrieve lcp array values. * * \param idx - (IN) Index of lcp array */ UInt32 LCP::operator [] (const UInt32 &idx) { // input is valid? // assert (idx >= 0 && idx < _size); if(!_is_compact){ // LCP array has not been compacted yet! return array[idx]; } if(_p_array[idx] < 255){ // Found in primary index return (UInt32) _p_array[idx]; } // svnvish: BUGBUG // Do some caching here. // // Now search in secondary index as last resort // std::pair< const_itr, const_itr > p = equal_range(_beg, _end, idx); // return _val_array[std::distance(_beg, p.first)]; if (++_cache == _end){ _cache = _beg; _dist = 0; }else{ _dist++; } UInt32 c_idx = *(_cache); if (c_idx == idx){ return _val_array[_dist]; } // _cache = std::equal_range(_beg, _end, idx).first; _cache = std::lower_bound(_beg, _end, idx); #ifdef _RWSTD_NO_CLASS_PARTIAL_SPEC _dist = 0; std::distance(_beg, _cache, _dist); #else _dist = std::distance(_beg, _cache); #endif //std::cout << "here" << std::endl; // _cache = equal_range(_beg, _end, idx).first; // _dist = std::distance(_beg, _cache); return _val_array[_dist]; // if (c_idx > idx){ // _cache = equal_range(_beg, _cache, idx).first; // }else{ // _cache = equal_range(_cache, _end, idx).first; // } // //_cache = p.first; // _dist = std::distance(_beg, _cache); // return _val_array[_dist]; } /** * Dump array elements to output stream. * * \param os - (IN) Output stream * \param lcp - (IN) LCP object. */ std::ostream& operator << (std::ostream& os, LCP& lcp){ for( UInt32 i = 0; i < lcp._size; i++ ){ os << "lcp[ " << i << "]: " << lcp[i] << std::endl; } return os; } #endif kernlab/src/wmsufsort.h0000644000175100001440000000347512774400037014734 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/W_msufsort.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 13 Jul 2007 : use MSufSort v3.1 instead of v2.2 // Wrapper for Michael Maniscalco's MSufSort version 3.1 algorithm #ifndef W_MSUFSORT_H #define W_MSUFSORT_H #include "datatype.h" #include "isafactory.h" #include "msufsort.h" class W_msufsort : public I_SAFactory { public: ///Variables //'Declaration of object POINTERS, no initialization needed. //'If Declaration of objects, initialize them in member initialization list. MSufSort *msuffixsorter; ///Constructor W_msufsort(); ///Destructor virtual ~W_msufsort(); ///Methods ErrorCode ConstructSA(SYMBOL *text, const UInt32 &len, UInt32 *&array); }; #endif kernlab/src/svm.cpp0000644000175100001440000025315612774400037014026 0ustar hornikusers#include #include #include #include #include #include #include #include #include #include #include "svm.h" typedef float Qfloat; typedef signed char schar; #ifndef min template inline T min(T x,T y) { return (x inline T max(T x,T y) { return (x>y)?x:y; } #endif template inline void swap(T& x, T& y) { T t=x; x=y; y=t; } template inline void clone(T*& dst, S* src, int n) { dst = new T[n]; memcpy((void *)dst,(void *)src,sizeof(T)*n); } inline double powi(double base, int times) { double tmp = base, ret = 1.0; for(int t=times; t>0; t/=2) { if(t%2==1) ret*=tmp; tmp = tmp * tmp; } return ret; } #define INF HUGE_VAL # define TAU 1e-12 #define Malloc(type,n) (type *)malloc((n)*sizeof(type)) #if 0 void info(char *fmt,...) { va_list ap; va_start(ap,fmt); //vprintf(fmt,ap); va_end(ap); } void info_flush() { fflush(stdout); } #else void info(char *fmt,...) {} void info_flush() {} #endif // // Kernel Cache // // l is the number of total data items // size is the cache size limit in bytes // class Cache { public: Cache(int l,long int size, int qpsize); ~Cache(); // request data [0,len) // return some position p where [p,len) need to be filled // (p >= len if nothing needs to be filled) int get_data(const int index, Qfloat **data, int len); void swap_index(int i, int j); // future_option private: int l; long int size; struct head_t { head_t *prev, *next; // a cicular list Qfloat *data; int len; // data[0,len) is cached in this entry }; head_t *head; head_t lru_head; void lru_delete(head_t *h); void lru_insert(head_t *h); }; Cache::Cache(int l_,long int size_,int qpsize):l(l_),size(size_) { head = (head_t *)calloc(l,sizeof(head_t)); // initialized to 0 size /= sizeof(Qfloat); size -= l * sizeof(head_t) / sizeof(Qfloat); size = max(size, (long int) qpsize*l); // cache must be large enough for 'qpsize' columns lru_head.next = lru_head.prev = &lru_head; } Cache::~Cache() { for(head_t *h = lru_head.next; h != &lru_head; h=h->next) free(h->data); free(head); } void Cache::lru_delete(head_t *h) { // delete from current location h->prev->next = h->next; h->next->prev = h->prev; } void Cache::lru_insert(head_t *h) { // insert to last position h->next = &lru_head; h->prev = lru_head.prev; h->prev->next = h; h->next->prev = h; } int Cache::get_data(const int index, Qfloat **data, int len) { head_t *h = &head[index]; if(h->len) lru_delete(h); int more = len - h->len; if(more > 0) { // free old space while(size < more) { head_t *old = lru_head.next; lru_delete(old); free(old->data); size += old->len; old->data = 0; old->len = 0; } // allocate new space h->data = (Qfloat *)realloc(h->data,sizeof(Qfloat)*len); size -= more; swap(h->len,len); } lru_insert(h); *data = h->data; return len; } void Cache::swap_index(int i, int j) { if(i==j) return; if(head[i].len) lru_delete(&head[i]); if(head[j].len) lru_delete(&head[j]); swap(head[i].data,head[j].data); swap(head[i].len,head[j].len); if(head[i].len) lru_insert(&head[i]); if(head[j].len) lru_insert(&head[j]); if(i>j) swap(i,j); for(head_t *h = lru_head.next; h!=&lru_head; h=h->next) { if(h->len > i) { if(h->len > j) swap(h->data[i],h->data[j]); else { // give up lru_delete(h); free(h->data); size += h->len; h->data = 0; h->len = 0; } } } } // // Kernel evaluation // // the static method k_function is for doing single kernel evaluation // the constructor of Kernel prepares to calculate the l*l kernel matrix // the member function get_Q is for getting one column from the Q Matrix // class QMatrix { public: virtual Qfloat *get_Q(int column, int len) const = 0; virtual double *get_QD() const = 0; virtual void swap_index(int i, int j) const = 0; virtual ~QMatrix() {} }; class Kernel: public QMatrix{ public: Kernel(int l, svm_node * const * x, const svm_parameter& param); virtual ~Kernel(); static double k_function(const svm_node *x, const svm_node *y, const svm_parameter& param); virtual Qfloat *get_Q(int column, int len) const = 0; virtual double *get_QD() const = 0; virtual void swap_index(int i, int j) const // no so const... { swap(x[i],x[j]); if(x_square) swap(x_square[i],x_square[j]); } protected: double (Kernel::*kernel_function)(int i, int j) const; private: const svm_node **x; double *x_square; // svm_parameter const int kernel_type; const int degree; const double gamma; const double coef0; const double lim; const double *K; const int m; static double dot(const svm_node *px, const svm_node *py); static double anova(const svm_node *px, const svm_node *py, const double sigma, const int degree); double kernel_linear(int i, int j) const { return dot(x[i],x[j]); } double kernel_poly(int i, int j) const { return powi(gamma*dot(x[i],x[j])+coef0,degree); } double kernel_rbf(int i, int j) const { return exp(-gamma*(x_square[i]+x_square[j]-2*dot(x[i],x[j]))); } double kernel_sigmoid(int i, int j) const { return tanh(gamma*dot(x[i],x[j])+coef0); } double kernel_laplace(int i, int j) const { return exp(-gamma*sqrt(fabs(x_square[i]+x_square[j]-2*dot(x[i],x[j])))); } double kernel_bessel(int i, int j) const { double bkt = gamma*sqrt(fabs(x_square[i]+x_square[j]-2*dot(x[i],x[j]))); if (bkt < 0.000001){ return 1 ; } else { return(powi(((jn((int)degree, bkt)/powi(bkt,((int)degree)))/lim),(int) coef0)); } } double kernel_anova(int i, int j) const { return anova(x[i], x[j], gamma, degree); } double kernel_spline(int i, int j) const { double result=1.0; double min; double t1,t4; const svm_node *px = x[i], *py= x[j]; // px = x[i]; // py = x[j]; while(px->index != -1 && py->index != -1) { if(px->index == py->index) { min=((px->valuevalue)?px->value:py->value); t1 = (px->value * py->value); t4 = min*min; result*=( 1.0 + t1 + (t1*min) ) - ( ((px->value+py->value)/2.0) * t4) + ((t4 * min)/3.0); } ++px; ++py; } return result; } double kernel_R(int i, int j) const { return *(K + m*i +j); } }; Kernel::Kernel(int l, svm_node * const * x_, const svm_parameter& param) :kernel_type(param.kernel_type), degree(param.degree), gamma(param.gamma), coef0(param.coef0), lim(param.lim), K(param.K), m(param.m) { switch(kernel_type) { case LINEAR: kernel_function = &Kernel::kernel_linear; break; case POLY: kernel_function = &Kernel::kernel_poly; break; case RBF: kernel_function = &Kernel::kernel_rbf; break; case SIGMOID: kernel_function = &Kernel::kernel_sigmoid; break; case LAPLACE: kernel_function = &Kernel::kernel_laplace; break; case BESSEL: kernel_function = &Kernel::kernel_bessel; break; case ANOVA: kernel_function = &Kernel::kernel_anova; break; case SPLINE: kernel_function = &Kernel::kernel_spline; break; case R: kernel_function = &Kernel::kernel_R; break; } clone(x,x_,l); if(kernel_type == RBF || kernel_type == LAPLACE || kernel_type == BESSEL) { x_square = new double[l]; for(int i=0;iindex != -1 && py->index != -1) { if(px->index == py->index) { sum += px->value * py->value; ++px; ++py; } else { if(px->index > py->index) ++py; else ++px; } } return sum; } double Kernel::anova(const svm_node *px, const svm_node *py, const double sigma, const int degree) { double sum = 0; double tv; while(px->index != -1 && py->index != -1) { if(px->index == py->index) { tv = (px->value - py->value) * (px->value - py->value); sum += exp( - sigma * tv); ++px; ++py; } else { if(px->index > py->index) { sum += exp( - sigma * (py->value * py->value)); ++py; } else { sum += exp( - sigma * (px->value * px->value)); ++px; } } } return (powi(sum,degree)); } double Kernel::k_function(const svm_node *x, const svm_node *y, const svm_parameter& param) { switch(param.kernel_type) { case LINEAR: return dot(x,y); case POLY: return powi(param.gamma*dot(x,y)+param.coef0,param.degree); case RBF: { double sum = 0; while(x->index != -1 && y->index !=-1) { if(x->index == y->index) { double d = x->value - y->value; sum += d*d; ++x; ++y; } else { if(x->index > y->index) { sum += y->value * y->value; ++y; } else { sum += x->value * x->value; ++x; } } } while(x->index != -1) { sum += x->value * x->value; ++x; } while(y->index != -1) { sum += y->value * y->value; ++y; } return exp(-param.gamma*sum); } case SIGMOID: return tanh(param.gamma*dot(x,y)+param.coef0); default: return 0; /* Unreachable */ } } // Generalized SMO+SVMlight algorithm // Solves: // // min 0.5(\alpha^T Q \alpha) + p^T \alpha // // y^T \alpha = \delta // y_i = +1 or -1 // 0 <= alpha_i <= Cp for y_i = 1 // 0 <= alpha_i <= Cn for y_i = -1 // // Given: // // Q, p, y, Cp, Cn, and an initial feasible point \alpha // l is the size of vectors and matrices // eps is the stopping criterion // // solution will be put in \alpha, objective value will be put in obj // class Solver { public: Solver() {}; virtual ~Solver() {}; struct SolutionInfo { double obj; double rho; double upper_bound_p; double upper_bound_n; double r; // for Solver_NU }; void Solve(int l, const QMatrix& Q, const double *p_, const schar *y_, double *alpha_, double Cp, double Cn, double eps, SolutionInfo* si, int shrinking); protected: int active_size; schar *y; double *G; // gradient of objective function enum { LOWER_BOUND, UPPER_BOUND, FREE }; char *alpha_status; // LOWER_BOUND, UPPER_BOUND, FREE double *alpha; const QMatrix *Q; const double *QD; double eps; double Cp,Cn; double *p; int *active_set; double *G_bar; // gradient, if we treat free variables as 0 int l; bool unshrink; // XXX double get_C(int i) { return (y[i] > 0)? Cp : Cn; } void update_alpha_status(int i) { if(alpha[i] >= get_C(i)) alpha_status[i] = UPPER_BOUND; else if(alpha[i] <= 0) alpha_status[i] = LOWER_BOUND; else alpha_status[i] = FREE; } bool is_upper_bound(int i) { return alpha_status[i] == UPPER_BOUND; } bool is_lower_bound(int i) { return alpha_status[i] == LOWER_BOUND; } bool is_free(int i) { return alpha_status[i] == FREE; } void swap_index(int i, int j); void reconstruct_gradient(); virtual int select_working_set(int &i, int &j); virtual double calculate_rho(); virtual void do_shrinking(); private: bool be_shrunk(int i, double Gmax1, double Gmax2); }; void Solver::swap_index(int i, int j) { Q->swap_index(i,j); swap(y[i],y[j]); swap(G[i],G[j]); swap(alpha_status[i],alpha_status[j]); swap(alpha[i],alpha[j]); swap(p[i],p[j]); swap(active_set[i],active_set[j]); swap(G_bar[i],G_bar[j]); } void Solver::reconstruct_gradient() { // reconstruct inactive elements of G from G_bar and free variables if(active_size == l) return; int i,j; int nr_free = 0; for(j=active_size;j 2*active_size*(l-active_size)) { for(i=active_size;iget_Q(i,active_size); for(j=0;jget_Q(i,l); double alpha_i = alpha[i]; for(j=active_size;jl = l; this->Q = &Q; QD=Q.get_QD(); clone(p, p_,l); clone(y, y_,l); clone(alpha,alpha_,l); this->Cp = Cp; this->Cn = Cn; this->eps = eps; unshrink = false; // initialize alpha_status { alpha_status = new char[l]; for(int i=0;iINT_MAX/100 ? INT_MAX : 100*l); int counter = min(l,1000)+1; while(iter < max_iter) { // show progress and do shrinking if(--counter == 0) { counter = min(l,1000); if(shrinking) do_shrinking(); } int i,j; if(select_working_set(i,j)!=0) { // reconstruct the whole gradient reconstruct_gradient(); // reset active set size and check active_size = l; if(select_working_set(i,j)!=0) break; else counter = 1; // do shrinking next iteration } ++iter; // update alpha[i] and alpha[j], handle bounds carefully const Qfloat *Q_i = Q.get_Q(i,active_size); const Qfloat *Q_j = Q.get_Q(j,active_size); double C_i = get_C(i); double C_j = get_C(j); double old_alpha_i = alpha[i]; double old_alpha_j = alpha[j]; if(y[i]!=y[j]) { double quad_coef = QD[i]+QD[j]+2*Q_i[j]; if (quad_coef <= 0) quad_coef = TAU; double delta = (-G[i]-G[j])/quad_coef; double diff = alpha[i] - alpha[j]; alpha[i] += delta; alpha[j] += delta; if(diff > 0) { if(alpha[j] < 0) { alpha[j] = 0; alpha[i] = diff; } } else { if(alpha[i] < 0) { alpha[i] = 0; alpha[j] = -diff; } } if(diff > C_i - C_j) { if(alpha[i] > C_i) { alpha[i] = C_i; alpha[j] = C_i - diff; } } else { if(alpha[j] > C_j) { alpha[j] = C_j; alpha[i] = C_j + diff; } } } else { double quad_coef = QD[i]+QD[j]-2*Q_i[j]; if (quad_coef <= 0) quad_coef = TAU; double delta = (G[i]-G[j])/quad_coef; double sum = alpha[i] + alpha[j]; alpha[i] -= delta; alpha[j] += delta; if(sum > C_i) { if(alpha[i] > C_i) { alpha[i] = C_i; alpha[j] = sum - C_i; } } else { if(alpha[j] < 0) { alpha[j] = 0; alpha[i] = sum; } } if(sum > C_j) { if(alpha[j] > C_j) { alpha[j] = C_j; alpha[i] = sum - C_j; } } else { if(alpha[i] < 0) { alpha[i] = 0; alpha[j] = sum; } } } // update G double delta_alpha_i = alpha[i] - old_alpha_i; double delta_alpha_j = alpha[j] - old_alpha_j; for(int k=0;k= max_iter) { if(active_size < l) { // reconstruct the whole gradient to calculate objective value reconstruct_gradient(); active_size = l; } } // calculate rho si->rho = calculate_rho(); // calculate objective value { double v = 0; int i; for(i=0;iobj = v/2; } // put back the solution { for(int i=0;iupper_bound_p = Cp; si->upper_bound_n = Cn; delete[] p; delete[] y; delete[] alpha; delete[] alpha_status; delete[] active_set; delete[] G; delete[] G_bar; } // return 1 if already optimal, return 0 otherwise int Solver::select_working_set(int &out_i, int &out_j) { // return i,j such that // i: maximizes -y_i * grad(f)_i, i in I_up(\alpha) // j: minimizes the decrease of obj value // (if quadratic coefficeint <= 0, replace it with tau) // -y_j*grad(f)_j < -y_i*grad(f)_i, j in I_low(\alpha) double Gmax = -INF; double Gmax2 = -INF; int Gmax_idx = -1; int Gmin_idx = -1; double obj_diff_min = INF; for(int t=0;t= Gmax) { Gmax = -G[t]; Gmax_idx = t; } } else { if(!is_lower_bound(t)) if(G[t] >= Gmax) { Gmax = G[t]; Gmax_idx = t; } } int i = Gmax_idx; const Qfloat *Q_i = NULL; if(i != -1) // NULL Q_i not accessed: Gmax=-INF if i=-1 Q_i = Q->get_Q(i,active_size); for(int j=0;j= Gmax2) Gmax2 = G[j]; if (grad_diff > 0) { double obj_diff; double quad_coef = QD[i]+QD[j]-2.0*y[i]*Q_i[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else obj_diff = -(grad_diff*grad_diff)/TAU; if (obj_diff <= obj_diff_min) { Gmin_idx=j; obj_diff_min = obj_diff; } } } } else { if (!is_upper_bound(j)) { double grad_diff= Gmax-G[j]; if (-G[j] >= Gmax2) Gmax2 = -G[j]; if (grad_diff > 0) { double obj_diff; double quad_coef = QD[i]+QD[j]+2.0*y[i]*Q_i[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else obj_diff = -(grad_diff*grad_diff)/TAU; if (obj_diff <= obj_diff_min) { Gmin_idx=j; obj_diff_min = obj_diff; } } } } } if(Gmax+Gmax2 < eps) return 1; out_i = Gmax_idx; out_j = Gmin_idx; return 0; } bool Solver::be_shrunk(int i, double Gmax1, double Gmax2) { if(is_upper_bound(i)) { if(y[i]==+1) return(-G[i] > Gmax1); else return(-G[i] > Gmax2); } else if(is_lower_bound(i)) { if(y[i]==+1) return(G[i] > Gmax2); else return(G[i] > Gmax1); } else return(false); } void Solver::do_shrinking() { int i; double Gmax1 = -INF; // max { -y_i * grad(f)_i | i in I_up(\alpha) } double Gmax2 = -INF; // max { y_i * grad(f)_i | i in I_low(\alpha) } // find maximal violating pair first for(i=0;i= Gmax1) Gmax1 = -G[i]; } if(!is_lower_bound(i)) { if(G[i] >= Gmax2) Gmax2 = G[i]; } } else { if(!is_upper_bound(i)) { if(-G[i] >= Gmax2) Gmax2 = -G[i]; } if(!is_lower_bound(i)) { if(G[i] >= Gmax1) Gmax1 = G[i]; } } } if(unshrink == false && Gmax1 + Gmax2 <= eps*10) { unshrink = true; reconstruct_gradient(); active_size = l; } for(i=0;i i) { if (!be_shrunk(active_size, Gmax1, Gmax2)) { swap_index(i,active_size); break; } active_size--; } } } double Solver::calculate_rho() { double r; int nr_free = 0; double ub = INF, lb = -INF, sum_free = 0; for(int i=0;i0) r = sum_free/nr_free; else r = (ub+lb)/2; return r; } // // Solver for nu-svm classification and regression // // additional constraint: e^T \alpha = constant // class Solver_NU: public Solver { public: Solver_NU() {} void Solve(int l, const QMatrix& Q, const double *p, const schar *y, double *alpha, double Cp, double Cn, double eps, SolutionInfo* si, int shrinking) { this->si = si; Solver::Solve(l,Q,p,y,alpha,Cp,Cn,eps,si,shrinking); } private: SolutionInfo *si; int select_working_set(int &i, int &j); double calculate_rho(); bool be_shrunk(int i, double Gmax1, double Gmax2, double Gmax3, double Gmax4); void do_shrinking(); }; // return 1 if already optimal, return 0 otherwise int Solver_NU::select_working_set(int &out_i, int &out_j) { // return i,j such that y_i = y_j and // i: maximizes -y_i * grad(f)_i, i in I_up(\alpha) // j: minimizes the decrease of obj value // (if quadratic coefficeint <= 0, replace it with tau) // -y_j*grad(f)_j < -y_i*grad(f)_i, j in I_low(\alpha) double Gmaxp = -INF; double Gmaxp2 = -INF; int Gmaxp_idx = -1; double Gmaxn = -INF; double Gmaxn2 = -INF; int Gmaxn_idx = -1; int Gmin_idx = -1; double obj_diff_min = INF; for(int t=0;t= Gmaxp) { Gmaxp = -G[t]; Gmaxp_idx = t; } } else { if(!is_lower_bound(t)) if(G[t] >= Gmaxn) { Gmaxn = G[t]; Gmaxn_idx = t; } } int ip = Gmaxp_idx; int in = Gmaxn_idx; const Qfloat *Q_ip = NULL; const Qfloat *Q_in = NULL; if(ip != -1) // NULL Q_ip not accessed: Gmaxp=-INF if ip=-1 Q_ip = Q->get_Q(ip,active_size); if(in != -1) Q_in = Q->get_Q(in,active_size); for(int j=0;j= Gmaxp2) Gmaxp2 = G[j]; if (grad_diff > 0) { double obj_diff; double quad_coef = QD[ip]+QD[j]-2*Q_ip[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else obj_diff = -(grad_diff*grad_diff)/TAU; if (obj_diff <= obj_diff_min) { Gmin_idx=j; obj_diff_min = obj_diff; } } } } else { if (!is_upper_bound(j)) { double grad_diff=Gmaxn-G[j]; if (-G[j] >= Gmaxn2) Gmaxn2 = -G[j]; if (grad_diff > 0) { double obj_diff; double quad_coef = QD[in]+QD[j]-2*Q_in[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else obj_diff = -(grad_diff*grad_diff)/TAU; if (obj_diff <= obj_diff_min) { Gmin_idx=j; obj_diff_min = obj_diff; } } } } } if(max(Gmaxp+Gmaxp2,Gmaxn+Gmaxn2) < eps) return 1; if (y[Gmin_idx] == +1) out_i = Gmaxp_idx; else out_i = Gmaxn_idx; out_j = Gmin_idx; return 0; } bool Solver_NU::be_shrunk(int i, double Gmax1, double Gmax2, double Gmax3, double Gmax4) { if(is_upper_bound(i)) { if(y[i]==+1) return(-G[i] > Gmax1); else return(-G[i] > Gmax4); } else if(is_lower_bound(i)) { if(y[i]==+1) return(G[i] > Gmax2); else return(G[i] > Gmax3); } else return(false); } void Solver_NU::do_shrinking() { double Gmax1 = -INF; // max { -y_i * grad(f)_i | y_i = +1, i in I_up(\alpha) } double Gmax2 = -INF; // max { y_i * grad(f)_i | y_i = +1, i in I_low(\alpha) } double Gmax3 = -INF; // max { -y_i * grad(f)_i | y_i = -1, i in I_up(\alpha) } double Gmax4 = -INF; // max { y_i * grad(f)_i | y_i = -1, i in I_low(\alpha) } // find maximal violating pair first int i; for(i=0;i Gmax1) Gmax1 = -G[i]; } else if(-G[i] > Gmax4) Gmax4 = -G[i]; } if(!is_lower_bound(i)) { if(y[i]==+1) { if(G[i] > Gmax2) Gmax2 = G[i]; } else if(G[i] > Gmax3) Gmax3 = G[i]; } } if(unshrink == false && max(Gmax1+Gmax2,Gmax3+Gmax4) <= eps*10) { unshrink = true; reconstruct_gradient(); active_size = l; } for(i=0;i i) { if (!be_shrunk(active_size, Gmax1, Gmax2, Gmax3, Gmax4)) { swap_index(i,active_size); break; } active_size--; } } } double Solver_NU::calculate_rho() { int nr_free1 = 0,nr_free2 = 0; double ub1 = INF, ub2 = INF; double lb1 = -INF, lb2 = -INF; double sum_free1 = 0, sum_free2 = 0; for(int i=0;i 0) r1 = sum_free1/nr_free1; else r1 = (ub1+lb1)/2; if(nr_free2 > 0) r2 = sum_free2/nr_free2; else r2 = (ub2+lb2)/2; si->r = (r1+r2)/2; return (r1-r2)/2; } /////////////////// BSVM code class Solver_SPOC { public: Solver_SPOC() {}; ~Solver_SPOC() {}; void Solve(int l, const Kernel& Q, double *alpha_, short *y_, double *C_, double eps, int shrinking, int nr_class); private: int active_size; double *G; // gradient of objective function short *y; bool *alpha_status; // free:true, bound:false double *alpha; const Kernel *Q; double eps; double *C; int *active_set; int l, nr_class; bool unshrinked; double get_C(int i, int m) { if (y[i] == m) return C[m]; return 0; } void update_alpha_status(int i, int m) { if(alpha[i*nr_class+m] >= get_C(i, m)) alpha_status[i*nr_class+m] = false; else alpha_status[i*nr_class+m] = true; } void swap_index(int i, int j); double select_working_set(int &q); void solve_sub_problem(double A, double *B, double C, double *nu); void reconstruct_gradient(); void do_shrinking(); }; void Solver_SPOC::swap_index(int i, int j) { Q->swap_index(i, j); swap(y[i], y[j]); swap(active_set[i], active_set[j]); for (int m=0;mget_Q(i,l); double alpha_i_m = alpha[i*nr_class+m]; for (int j=active_size;jl = l; this->nr_class = nr_class; this->Q = &Q; clone(y,y_,l); clone(alpha,alpha_,l*nr_class); C = C_; this->eps = eps; unshrinked = false; int i, m, q, old_q = -1; // initialize alpha_status { alpha_status = new bool[l*nr_class]; for(i=0;i 0) solve_sub_problem(A, B, C[y[q]], nu); else { i = 0; for (m=1;m B[i]) i = m; nu[i] = -C[y[q]]; } nu[y[q]] += C[y[q]]; for (m=0;m 1e-12) #endif { alpha[q*nr_class+m] = nu[m]; update_alpha_status(q, m); for (i=0;i 0) nSV++; } //info("\noptimization finished, #iter = %d, obj = %lf\n",iter, obj); // info("nSV = %d, nFREE = %d\n",nSV,nFREE); // put back the solution { for(int i=0;i vio_q) { q = i; vio_q = lb - ub; } } return vio_q; } void Solver_SPOC::do_shrinking() { int i, m; double Gm = select_working_set(i); if (Gm < eps) return; // shrink for (i=0;i= th) goto out; for (m++;m= th) goto out; --active_size; swap_index(i, active_size); --i; out: ; } // unshrink, check all variables again before final iterations if (unshrinked || Gm > 10*eps) return; unshrinked = true; reconstruct_gradient(); for (i=l-1;i>=active_size;i--) { double *G_i = &G[i*nr_class]; double th = G_i[y[i]] - Gm/2; for (m=0;m= th) goto out1; for (m++;m= th) goto out1; swap_index(i, active_size); ++active_size; ++i; out1: ; } } int compar(const void *a, const void *b) { if (*(double *)a > *(double *)b) return -1; else if (*(double *)a < *(double *)b) return 1; return 0; } void Solver_SPOC::solve_sub_problem(double A, double *B, double C, double *nu) { int r; double *D; clone(D, B, nr_class+1); qsort(D, nr_class, sizeof(double), compar); D[nr_class] = -INF; double phi = D[0] - A*C; for (r=0;phi<(r+1)*D[r+1];r++) phi += D[r+1]; delete[] D; phi /= (r+1); for (r=0;r 0)? Cp : Cn; } void update_alpha_status(int i) { if(alpha[i] >= get_C(i)) alpha_status[i] = UPPER_BOUND; else if(alpha[i] <= 0) alpha_status[i] = LOWER_BOUND; else alpha_status[i] = FREE; } bool is_upper_bound(int i) { return alpha_status[i] == UPPER_BOUND; } bool is_lower_bound(int i) { return alpha_status[i] == LOWER_BOUND; } bool is_free(int i) { return alpha_status[i] == FREE; } virtual void swap_index(int i, int j); virtual void reconstruct_gradient(); virtual void shrink_one(int k); virtual void unshrink_one(int k); double select_working_set(int &q); void do_shrinking(); private: double Cp, Cn; double *b; schar *y; }; void Solver_B::swap_index(int i, int j) { Q->swap_index(i,j); swap(y[i],y[j]); swap(G[i],G[j]); swap(alpha_status[i],alpha_status[j]); swap(alpha[i],alpha[j]); swap(b[i],b[j]); swap(active_set[i],active_set[j]); swap(G_bar[i],G_bar[j]); } void Solver_B::reconstruct_gradient() { // reconstruct inactive elements of G from G_bar and free variables if(active_size == l) return; int i; for(i=active_size;iget_Q(i,l); double alpha_i = alpha[i]; for(int j=active_size;jl = l; this->Q = &Q; b = b_; clone(y, y_, l); clone(alpha,alpha_,l); this->Cp = Cp; this->Cn = Cn; this->eps = eps; this->qpsize = qpsize; unshrinked = false; // initialize alpha_status { alpha_status = new char[l]; for(int i=0;i1e-12) { alpha[working_set[i]] = qp.x[i]; Qfloat *QB_i = QB[i]; for(j=0;jobj = v/2; } // juggle everything back /*{ for(int i=0;iupper_bound = new double[2]; si->upper_bound[0] = Cp; si->upper_bound[1] = Cn; // info("\noptimization finished, #iter = %d\n",iter); // put back the solution { for(int i=0;i= positive_max[j]) break; positive_max[j-1] = positive_max[j]; positive_set[j-1] = positive_set[j]; } positive_max[j-1] = v; positive_set[j-1] = i; } } for (i=0;i0) continue; } if (v > positive_max[0]) { for (j=1;j= -Gm) continue; } else continue; --active_size; shrink_one(k); --k; // look at the newcomer } // unshrink, check all variables again before final iterations if (unshrinked || Gm > eps*10) return; unshrinked = true; reconstruct_gradient(); for(k=l-1;k>=active_size;k--) { if (is_lower_bound(k)) { if (G[k] > Gm) continue; } else if (is_upper_bound(k)) { if (G[k] < -Gm) continue; } else continue; unshrink_one(k); active_size++; ++k; // look at the newcomer } } class Solver_B_linear : public Solver_B { public: Solver_B_linear() {}; ~Solver_B_linear() {}; int Solve(int l, svm_node * const * x_, double *b_, schar *y_, double *alpha_, double *w, double Cp, double Cn, double eps, SolutionInfo* si, int shrinking, int qpsize); private: double get_C(int i) { return (y[i] > 0)? Cp : Cn; } void swap_index(int i, int j); void reconstruct_gradient(); double dot(int i, int j); double Cp, Cn; double *b; schar *y; double *w; const svm_node **x; }; double Solver_B_linear::dot(int i, int j) { const svm_node *px = x[i], *py = x[j]; double sum = 0; while(px->index != -1 && py->index != -1) { if(px->index == py->index) { sum += px->value * py->value; ++px; ++py; } else { if(px->index > py->index) ++py; else ++px; } } return sum; } void Solver_B_linear::swap_index(int i, int j) { swap(y[i],y[j]); swap(G[i],G[j]); swap(alpha_status[i],alpha_status[j]); swap(alpha[i],alpha[j]); swap(b[i],b[j]); swap(active_set[i],active_set[j]); swap(x[i], x[j]); } void Solver_B_linear::reconstruct_gradient() { int i; for(i=active_size;iindex != -1;px++) sum += w[px->index]*px->value; sum += w[0]; G[i] = y[i]*sum + b[i]; } } int Solver_B_linear::Solve(int l, svm_node * const * x_, double *b_, schar *y_, double *alpha_, double *w, double Cp, double Cn, double eps, SolutionInfo* si, int shrinking, int qpsize) { this->l = l; clone(x, x_, l); clone(b, b_, l); clone(y, y_, l); clone(alpha,alpha_,l); this->Cp = Cp; this->Cn = Cn; this->eps = eps; this->qpsize = qpsize; this->w = w; unshrinked = false; // initialize alpha_status { alpha_status = new char[l]; for(int i=0;iindex != -1;px++) sum += w[px->index]*px->value; sum += w[0]; G[i] += y[i]*sum; } } // optimization step int iter = 0; int counter = min(l*2/qpsize,2000/qpsize)+1; while(1) { // show progress and do shrinking if(--counter == 0) { counter = min(l*2/qpsize, 2000/qpsize); if(shrinking) do_shrinking(); // info("."); } int i,j,q; if (select_working_set(q) < eps) { // reconstruct the whole gradient reconstruct_gradient(); // reset active set size and check active_size = l; // info("*");info_flush(); if (select_working_set(q) < eps) break; else counter = 1; // do shrinking next iteration } if (counter == min(l*2/qpsize, 2000/qpsize)) { bool same = true; for (i=0;i1e-12) { alpha[Bi] = qp.x[i]; update_alpha_status(Bi); double yalpha = y[Bi]*d; for (const svm_node *px = x[Bi];px->index != -1;px++) w[px->index] += yalpha*px->value; w[0] += yalpha; } } for(j=0;jindex != -1;px++) sum += w[px->index]*px->value; sum += w[0]; G[j] = y[j]*sum + b[j]; } } // calculate objective value { double v = 0; int i; for(i=0;iobj = v/2; } // juggle everything back /*{ for(int i=0;iupper_bound = new double[2]; si->upper_bound[0] = Cp; si->upper_bound[1] = Cn; // info("\noptimization finished, #iter = %d\n",iter); // put back the solution { for(int i=0;iget_Q(real_i[i],real_l); double alpha_i = alpha[i], t; int y_i = y[i], yy_i = yy[i], ub, k; t = 2*alpha_i; ub = start2[yy_i*nr_class+y_i+1]; for (j=start2[yy_i*nr_class+y_i];jl = l; this->nr_class = nr_class; this->real_l = l/(nr_class - 1); this->Q = &Q; this->lin = lin; clone(y,y_,l); clone(alpha,alpha_,l); C = C_; this->eps = eps; this->qpsize = qpsize; unshrinked = false; // initialize alpha_status { alpha_status = new char[l]; for(int i=0;i 1e-12) { alpha[Bi] = qp.x[i]; Qfloat *QB_i = QB[i]; int y_Bi = y[Bi], yy_Bi = yy[Bi], ub, k; double t = 2*d; ub = start1[yy_Bi*nr_class+y_Bi+1]; for (j=start1[yy_Bi*nr_class+y_Bi];jobj = v/4; } clone(si->upper_bound,C,nr_class); //info("\noptimization finished, #iter = %d\n",iter); // put back the solution { for(int i=0;i0;i--) swap_index(start2[i], start2[i-1]); t = s + 1; for (i=nr_class*nr_class;i>t;i--) swap_index(start1[i], start1[i-1]); t = nr_class*nr_class; for (i=s+1;i<=t;i++) start1[i]++; for (i=0;i<=s;i++) start2[i]++; } // // Q matrices for various formulations // class BSVC_Q: public Kernel { public: BSVC_Q(const svm_problem& prob, const svm_parameter& param, const schar *y_) :Kernel(prob.l, prob.x, param) { clone(y,y_,prob.l); cache = new Cache(prob.l,(int)(param.cache_size*(1<<20)),param.qpsize); QD = new double[1]; QD[0] = 1; } Qfloat *get_Q(int i, int len) const { Qfloat *data; int start; if((start = cache->get_data(i,&data,len)) < len) { for(int j=start;j*kernel_function)(i,j) + 1); } return data; } double *get_QD() const { return QD; } void swap_index(int i, int j) const { cache->swap_index(i,j); Kernel::swap_index(i,j); swap(y[i],y[j]); } ~BSVC_Q() { delete[] y; delete cache; delete[] QD; } private: schar *y; Cache *cache; double *QD; }; class BONE_CLASS_Q: public Kernel { public: BONE_CLASS_Q(const svm_problem& prob, const svm_parameter& param) :Kernel(prob.l, prob.x, param) { cache = new Cache(prob.l,(int)(param.cache_size*(1<<20)),param.qpsize); QD = new double[1]; QD[0] = 1; } Qfloat *get_Q(int i, int len) const { Qfloat *data; int start; if((start = cache->get_data(i,&data,len)) < len) { for(int j=start;j*kernel_function)(i,j) + 1; } return data; } double *get_QD() const { return QD; } ~BONE_CLASS_Q() { delete cache; delete[] QD; } private: Cache *cache; double *QD; }; class BSVR_Q: public Kernel { public: BSVR_Q(const svm_problem& prob, const svm_parameter& param) :Kernel(prob.l, prob.x, param) { l = prob.l; cache = new Cache(l,(int)(param.cache_size*(1<<20)),param.qpsize); QD = new double[1]; QD[0] = 1; sign = new schar[2*l]; index = new int[2*l]; for(int k=0;kget_data(real_i,&data,l) < l) { for(int j=0;j*kernel_function)(real_i,j) + 1; } // reorder and copy Qfloat *buf = buffer[next_buffer]; next_buffer = (next_buffer+1)%q; schar si = sign[i]; for(int j=0;j*kernel_function)(i,i); } Qfloat *get_Q(int i, int len) const { Qfloat *data; int start; if((start = cache->get_data(i,&data,len)) < len) { for(int j=start;j*kernel_function)(i,j)); } return data; } double *get_QD() const { return QD; } void swap_index(int i, int j) const { cache->swap_index(i,j); Kernel::swap_index(i,j); swap(y[i],y[j]); swap(QD[i],QD[j]); } ~SVC_Q() { delete[] y; delete cache; delete[] QD; } private: schar *y; Cache *cache; double *QD; }; class ONE_CLASS_Q: public Kernel { public: ONE_CLASS_Q(const svm_problem& prob, const svm_parameter& param) :Kernel(prob.l, prob.x, param) { cache = new Cache(prob.l,(long int)(param.cache_size*(1<<20)),param.qpsize); QD = new double[prob.l]; for(int i=0;i*kernel_function)(i,i); } Qfloat *get_Q(int i, int len) const { Qfloat *data; int start; if((start = cache->get_data(i,&data,len)) < len) { for(int j=start;j*kernel_function)(i,j); } return data; } double *get_QD() const { return QD; } void swap_index(int i, int j) const { cache->swap_index(i,j); Kernel::swap_index(i,j); swap(QD[i],QD[j]); } ~ONE_CLASS_Q() { delete cache; delete[] QD; } private: Cache *cache; double *QD; }; class SVR_Q: public Kernel { public: SVR_Q(const svm_problem& prob, const svm_parameter& param) :Kernel(prob.l, prob.x, param) { l = prob.l; cache = new Cache(l,(long int)(param.cache_size*(1<<20)),param.qpsize); QD = new double[2*l]; sign = new schar[2*l]; index = new int[2*l]; for(int k=0;k*kernel_function)(k,k); QD[k+l]=QD[k]; } buffer[0] = new Qfloat[2*l]; buffer[1] = new Qfloat[2*l]; next_buffer = 0; } void swap_index(int i, int j) const { swap(sign[i],sign[j]); swap(index[i],index[j]); swap(QD[i],QD[j]); } Qfloat *get_Q(int i, int len) const { Qfloat *data; int real_i = index[i]; if(cache->get_data(real_i,&data,l) < l) { for(int j=0;j*kernel_function)(real_i,j); } // reorder and copy Qfloat *buf = buffer[next_buffer]; next_buffer = 1 - next_buffer; schar si = sign[i]; for(int j=0;jsvm_type; if(svm_type != C_BSVC && svm_type != EPSILON_BSVR && svm_type != KBB && svm_type != SPOC) return "unknown svm type"; // kernel_type int kernel_type = param->kernel_type; if(kernel_type != LINEAR && kernel_type != POLY && kernel_type != RBF && kernel_type != SIGMOID && kernel_type != R && kernel_type != LAPLACE&& kernel_type != BESSEL&& kernel_type != ANOVA) return "unknown kernel type"; // cache_size,eps,C,nu,p,shrinking if(kernel_type != LINEAR) if(param->cache_size <= 0) return "cache_size <= 0"; if(param->eps <= 0) return "eps <= 0"; if(param->C <= 0) return "C <= 0"; if(svm_type == EPSILON_BSVR) if(param->p < 0) return "p < 0"; if(param->shrinking != 0 && param->shrinking != 1) return "shrinking != 0 and shrinking != 1"; if(svm_type == C_BSVC || svm_type == KBB || svm_type == SPOC) if(param->qpsize < 2) return "qpsize < 2"; if(kernel_type == LINEAR) if (param->Cbegin <= 0) return "Cbegin <= 0"; if(kernel_type == LINEAR) if (param->Cstep <= 1) return "Cstep <= 1"; return NULL; } const char *svm_check_parameter(const svm_problem *prob, const svm_parameter *param) { // svm_type int svm_type = param->svm_type; if(svm_type != C_SVC && svm_type != NU_SVC && svm_type != ONE_CLASS && svm_type != EPSILON_SVR && svm_type != NU_SVR) return "unknown svm type"; // kernel_type int kernel_type = param->kernel_type; if(kernel_type != LINEAR && kernel_type != POLY && kernel_type != RBF && kernel_type != SIGMOID && kernel_type != R && kernel_type != LAPLACE&& kernel_type != BESSEL&& kernel_type != ANOVA&& kernel_type != SPLINE) return "unknown kernel type"; // cache_size,eps,C,nu,p,shrinking if(param->cache_size <= 0) return "cache_size <= 0"; if(param->eps <= 0) return "eps <= 0"; if(svm_type == C_SVC || svm_type == EPSILON_SVR || svm_type == NU_SVR) if(param->C <= 0) return "C <= 0"; if(svm_type == NU_SVC || svm_type == ONE_CLASS || svm_type == NU_SVR) if(param->nu < 0 || param->nu > 1) return "nu < 0 or nu > 1"; if(svm_type == EPSILON_SVR) if(param->p < 0) return "p < 0"; if(param->shrinking != 0 && param->shrinking != 1) return "shrinking != 0 and shrinking != 1"; // check whether nu-svc is feasible if(svm_type == NU_SVC) { int l = prob->l; int max_nr_class = 16; int nr_class = 0; int *label = Malloc(int,max_nr_class); int *count = Malloc(int,max_nr_class); int i; for(i=0;iy[i]; int j; for(j=0;jnu*(n1+n2)/2 > min(n1,n2)) { free(label); free(count); return "specified nu is infeasible"; } } } } return NULL; } #include #include #include extern "C" { struct svm_node ** sparsify (double *x, int r, int c) { struct svm_node** sparse; int i, ii, count; sparse = (struct svm_node **) malloc (r * sizeof(struct svm_node *)); for (i = 0; i < r; i++) { /* determine nr. of non-zero elements */ for (count = ii = 0; ii < c; ii++) if (x[i * c + ii] != 0) count++; /* allocate memory for column elements */ sparse[i] = (struct svm_node *) malloc ((count + 1) * sizeof(struct svm_node)); /* set column elements */ for (count = ii = 0; ii < c; ii++) if (x[i * c + ii] != 0) { sparse[i][count].index = ii; sparse[i][count].value = x[i * c + ii]; count++; } /* set termination element */ sparse[i][count].index = -1; } return sparse; } struct svm_node ** transsparse (double *x, int r, int *rowindex, int *colindex) { struct svm_node** sparse; int i, ii, count = 0, nnz = 0; sparse = (struct svm_node **) malloc (r * sizeof(struct svm_node*)); for (i = 0; i < r; i++) { /* allocate memory for column elements */ nnz = rowindex[i+1] - rowindex[i]; sparse[i] = (struct svm_node *) malloc ((nnz + 1) * sizeof(struct svm_node)); /* set column elements */ for (ii = 0; ii < nnz; ii++) { sparse[i][ii].index = colindex[count]; sparse[i][ii].value = x[count]; count++; } /* set termination element */ sparse[i][ii].index = -1; } return sparse; } void tron_run(const svm_problem *prob, const svm_parameter* param, double *alpha, double *weighted_C, Solver_B::SolutionInfo* sii, int nr_class, int *count) { int l = prob->l; int i; double Cp = param->C; double Cn = param->C; if(param->nr_weight > 0) { Cp = param->C*param->weight[0]; Cn = param->C*param->weight[1]; } switch(param->svm_type) { case C_BSVC: { // double *alpha = new double[l]; double *minus_ones = new double[l]; schar *y = new schar[l]; for(i=0;iy[i] > 0) y[i] = +1; else y[i]=-1; } if (param->kernel_type == LINEAR) { double *w = new double[prob->n+1]; for (i=0;i<=prob->n;i++) w[i] = 0; Solver_B_linear s; int totaliter = 0; double Cpj = param->Cbegin, Cnj = param->Cbegin*Cn/Cp; while (Cpj < Cp) { totaliter += s.Solve(l, prob->x, minus_ones, y, alpha, w, Cpj, Cnj, param->eps, sii, param->shrinking, param->qpsize); if (Cpj*param->Cstep >= Cp) { for (i=0;i<=prob->n;i++) w[i] = 0; for (i=0;i= Cpj) alpha[i] = Cp; else if (y[i] == -1 && alpha[i] >= Cnj) alpha[i] = Cn; else alpha[i] *= Cp/Cpj; double yalpha = y[i]*alpha[i]; for (const svm_node *px = prob->x[i];px->index != -1;px++) w[px->index] += yalpha*px->value; w[0] += yalpha; } } else { for (i=0;iCstep; for (i=0;i<=prob->n;i++) w[i] *= param->Cstep; } Cpj *= param->Cstep; Cnj *= param->Cstep; } totaliter += s.Solve(l, prob->x, minus_ones, y, alpha, w, Cp, Cn, param->eps, sii, param->shrinking, param->qpsize); //info("\noptimization finished, #iter = %d\n",totaliter); delete[] w; } else { Solver_B s; s.Solve(l, BSVC_Q(*prob,*param,y), minus_ones, y, alpha, Cp, Cn, param->eps, sii, param->shrinking, param->qpsize); } // double sum_alpha=0; // for(i=0;iC*prob->l)); // for(i=0;ip - prob->y[i]; y[i] = 1; alpha2[i+l] = 0; linear_term[i+l] = param->p + prob->y[i]; y[i+l] = -1; } if (param->kernel_type == LINEAR) { double *w = new double[prob->n+1]; for (i=0;i<=prob->n;i++) w[i] = 0; struct svm_node **x = new svm_node*[2*l]; for (i=0;ix[i]; Solver_B_linear s; int totaliter = 0; double Cj = param->Cbegin; while (Cj < param->C) { totaliter += s.Solve(2*l, x, linear_term, y, alpha, w, Cj, Cj, param->eps, sii, param->shrinking, param->qpsize); if (Cj*param->Cstep >= param->C) { for (i=0;i<=prob->n;i++) w[i] = 0; for (i=0;i<2*l;i++) { if (alpha[i] >= Cj) alpha[i] = param->C; else alpha[i] *= param->C/Cj; double yalpha = y[i]*alpha[i]; for (const svm_node *px = x[i];px->index != -1;px++) w[px->index] += yalpha*px->value; w[0] += yalpha; } } else { for (i=0;i<2*l;i++) alpha[i] *= param->Cstep; for (i=0;i<=prob->n;i++) w[i] *= param->Cstep; } Cj *= param->Cstep; } totaliter += s.Solve(2*l, x, linear_term, y, alpha2, w, param->C, param->C, param->eps, sii, param->shrinking, param->qpsize); //info("\noptimization finished, #iter = %d\n",totaliter); } else { Solver_B s; s.Solve(2*l, BSVR_Q(*prob,*param), linear_term, y, alpha2, param->C, param->C, param->eps, sii, param->shrinking, param->qpsize); } double sum_alpha = 0; for(i=0;iC*l)); delete[] y; delete[] alpha2; delete[] linear_term; } break; case KBB: { Solver_B::SolutionInfo si; int i=0 , j=0 ,k=0 , ll = l*(nr_class - 1); double *alpha2 = Malloc(double, ll); short *y = new short[ll]; for (i=0;iy[q]; else q += count[j]; } Solver_MB s; s.Solve(ll, BONE_CLASS_Q(*prob,*param), -2, alpha2, y, weighted_C, 2*param->eps, &si, param->shrinking, param->qpsize, nr_class, count); //info("obj = %f, rho = %f\n",si.obj,0.0); int *start = Malloc(int,nr_class); start[0] = 0; for(i=1;iy[i]; } Solver_SPOC s; s.Solve(l, ONE_CLASS_Q(*prob, *param), alpha, y, weighted_C, param->eps, param->shrinking, nr_class); free(weighted_C); delete[] y; } break; } } SEXP tron_optim(SEXP x, SEXP r, SEXP c, SEXP y, SEXP K, SEXP colindex, SEXP rowindex, SEXP sparse, SEXP nclass, SEXP countc, SEXP kernel_type, SEXP svm_type, SEXP cost, SEXP eps, SEXP gamma, SEXP degree, SEXP coef0, SEXP Cbegin, SEXP Cstep, SEXP weightlabels, SEXP weights, SEXP nweights, SEXP weightedc, SEXP cache, SEXP epsilon, SEXP qpsize, SEXP shrinking ) { struct svm_parameter param; struct svm_problem prob; int i ,*count = NULL; double *alpha2 = NULL; SEXP alpha3 = NULL; int nr_class; const char* s; struct Solver_B::SolutionInfo si; param.svm_type = *INTEGER(svm_type); param.kernel_type = *INTEGER(kernel_type); param.degree = *INTEGER(degree); param.gamma = *REAL(gamma); param.coef0 = *REAL(coef0); param.cache_size = *REAL(cache); param.eps = *REAL(epsilon); param.C = *REAL(cost); param.Cbegin = *REAL(Cbegin); param.Cstep = *REAL(Cstep); param.K = REAL(K); param.qpsize = *INTEGER(qpsize); nr_class = *INTEGER(nclass); param.nr_weight = *INTEGER(nweights); if (param.nr_weight > 0) { param.weight = (double *) malloc (sizeof(double) * param.nr_weight); memcpy (param.weight, REAL(weights), param.nr_weight * sizeof(double)); param.weight_label = (int *) malloc (sizeof(int) * param.nr_weight); memcpy (param.weight_label, INTEGER(weightlabels), param.nr_weight * sizeof(int)); } param.p = *REAL(eps); param.shrinking = *INTEGER(shrinking); param.lim = 1/(gammafn(param.degree+1)*powi(2,param.degree)); /* set problem */ prob.l = *INTEGER(r); prob.n = *INTEGER(c); prob.y = (double *) malloc (sizeof(double) * prob.l); memcpy(prob.y, REAL(y), prob.l*sizeof(double)); if (*INTEGER(sparse) > 0) prob.x = transsparse(REAL(x), *INTEGER(r), INTEGER(rowindex), INTEGER(colindex)); else prob.x = sparsify(REAL(x), *INTEGER(r), *INTEGER(c)); s = svm_check_parameterb(&prob, ¶m); //if (s) //printf("%s",s); //else { double *weighted_C = Malloc(double, nr_class); memcpy(weighted_C, REAL(weightedc), nr_class*sizeof(double)); if(param.svm_type == 7) { alpha2 = (double *) malloc (sizeof(double) * prob.l*nr_class); } if(param.svm_type == 8) { count = Malloc(int, nr_class); memcpy(count, INTEGER(countc), nr_class*sizeof(int)); alpha2 = (double *) malloc (sizeof(double) * prob.l*(nr_class-1)); } if(param.svm_type == 5||param.svm_type==6) { alpha2 = (double *) malloc (sizeof(double) * prob.l); } tron_run(&prob, ¶m, alpha2, weighted_C , &si, nr_class, count); //} /* clean up memory */ if (param.nr_weight > 0) { free(param.weight); free(param.weight_label); } if(param.svm_type == 7) { PROTECT(alpha3 = allocVector(REALSXP, (nr_class*prob.l + 1))); UNPROTECT(1); for (i = 0; i < prob.l; i++) free (prob.x[i]); for (i = 0; i l; int i; switch(param->svm_type) { case C_SVC: { double Cp,Cn; double *minus_ones = new double[l]; schar *y = new schar[l]; for(i=0;iy[i] > 0) y[i] = +1; else y[i]=-1; } if(param->nr_weight > 0) { Cp = C*param->weight[0]; Cn = C*param->weight[1]; } else Cp = Cn = C; Solver s; //have to weight cost parameter for multiclass. problems s.Solve(l, SVC_Q(*prob,*param,y), minus_ones, y, alpha, Cp, Cn, param->eps, si, param->shrinking); delete[] minus_ones; delete[] y; } break; case NU_SVC: { schar *y = new schar[l]; double nu = param->nu; double sum_pos = nu*l/2; double sum_neg = nu*l/2; for(i=0;iy[i]>0) { y[i] = +1; alpha[i] = min(1.0,sum_pos); sum_pos -= alpha[i]; } else { y[i] = -1; alpha[i] = min(1.0,sum_neg); sum_neg -= alpha[i]; } double *zeros = new double[l]; for(i=0;ieps, si, param->shrinking); double r = si->r; //info("C = %f\n",1/r); for(i=0;irho /= r; si->obj /= (r*r); si->upper_bound_p = 1/r; si->upper_bound_n = 1/r; delete[] y; delete[] zeros; } break; case ONE_CLASS: { double *zeros = new double[l]; schar *ones = new schar[l]; int n = (int)(param->nu*l); // # of alpha's at upper bound // set initial alpha probably usefull for smo for(i=0;inu * l - n; for(i=n+1;ieps, si, param->shrinking); delete[] zeros; delete[] ones; } break; case EPSILON_SVR: { double *alpha2 = new double[2*l]; double *linear_term = new double[2*l]; schar *y = new schar[2*l]; for(i=0;ip - prob->y[i]; y[i] = 1; alpha2[i+l] = 0; linear_term[i+l] = param->p + prob->y[i]; y[i+l] = -1; } Solver s; s.Solve(2*l, SVR_Q(*prob,*param), linear_term, y, alpha2, param->C, param->C, param->eps, si, param->shrinking); double sum_alpha = 0; for(i=0;iC*l)); delete[] alpha2; delete[] linear_term; delete[] y; } break; case NU_SVR: { double C = param->C; double *alpha2 = new double[2*l]; double *linear_term = new double[2*l]; schar *y = new schar[2*l]; double sum = C * param->nu * l / 2; for(i=0;iy[i]; y[i] = 1; linear_term[i+l] = prob->y[i]; y[i+l] = -1; } Solver_NU s; s.Solve(2*l, SVR_Q(*prob,*param), linear_term, y, alpha2, C, C, param->eps, si, param->shrinking); //info("epsilon = %f\n",-si->r); for(i=0;i 0) { param.weight = (double *) malloc (sizeof(double) * param.nr_weight); memcpy (param.weight, REAL(weights), param.nr_weight * sizeof(double)); param.weight_label = (int *) malloc (sizeof(int) * param.nr_weight); memcpy (param.weight_label, INTEGER(weightlabels), param.nr_weight * sizeof(int)); } param.p = *REAL(eps); param.shrinking = *INTEGER(shrinking); param.lim = 1/(gammafn(param.degree+1)*powi(2,param.degree)); /* set problem */ prob.l = *INTEGER(r); prob.y = REAL(y); prob.n = *INTEGER(c); if (*INTEGER(sparse) > 0) prob.x = transsparse(REAL(x), *INTEGER(r), INTEGER(rowindex), INTEGER(colindex)); else prob.x = sparsify(REAL(x), *INTEGER(r), *INTEGER(c)); double *alpha2 = (double *) malloc (sizeof(double) * prob.l); s = svm_check_parameter(&prob, ¶m); //if (s) { //printf("%s",s); //} //else { solve_smo(&prob, ¶m, alpha2, &si, *REAL(cost), REAL(linear_term)); //} PROTECT(alpha = allocVector(REALSXP, prob.l+2)); /* clean up memory */ if (param.nr_weight > 0) { free(param.weight); free(param.weight_label); } for (i = 0; i < prob.l; i++) {free (prob.x[i]); REAL(alpha)[i] = *(alpha2+i); } free (prob.x); REAL(alpha)[prob.l] = si.rho; REAL(alpha)[prob.l+1] = si.obj; free(alpha2); UNPROTECT(1); return alpha; } } kernlab/src/solvebqp.c0000644000175100001440000000316012774400037014500 0ustar hornikusers#include #include #include /* LEVEL 1 BLAS */ /*extern double ddot_(int *, double *, int *, double *, int *); */ /* LEVEL 2 BLAS */ /*extern int dsymv_(char *, int *, double *, double *, int *, double *, int *, double *, double *, int *);*/ /* MINPACK 2 */ extern void dtron(int, double *, double *, double *, double, double, double, double, int, double); struct BQP { double eps; int n; double *x, *C, *Q, *p; }; int nfev, inc = 1; double one = 1, zero = 0, *A, *g0; int uhes(int n, double *x, double **H) { *H = A; return 0; } int ugrad(int n, double *x, double *g) { /* evaluate the gradient g = A*x + g0 */ memcpy(g, g0, sizeof(double)*n); F77_CALL(dsymv)("U", &n, &one, A, &n, x, &inc, &one, g, &inc); return 0; } int ufv(int n, double *x, double *f) { /* evaluate the function value f(x) = 0.5*x'*A*x + g0'*x */ double *t = (double *) malloc(sizeof(double)*n); F77_CALL(dsymv)("U", &n, &one, A, &n, x, &inc, &zero, t, &inc); *f = F77_CALL(ddot)(&n, x, &inc, g0, &inc) + 0.5 * F77_CALL(ddot)(&n, x, &inc, t, &inc); free(t); return ++nfev; } void solvebqp(struct BQP *qp) { /* driver for positive semidefinite quadratic programing version of tron */ int i, n, maxfev; double *x, *xl, *xu; double frtol, fatol, fmin, gtol, cgtol; n = qp->n; maxfev = 1000; /* ? */ nfev = 0; x = qp->x; xu = qp->C; A = qp->Q; g0 = qp->p; xl = (double *) malloc(sizeof(double)*n); for (i=0;ieps; dtron(n, x, xl, xu, gtol, frtol, fatol, fmin, maxfev, cgtol); free(xl); } kernlab/src/stack.h0000644000175100001440000000630512774400037013763 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the MSufSort suffix sorting algorithm (Version 2.2). * * The Initial Developer of the Original Code is * Michael A. Maniscalco * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Michael A. Maniscalco * * ***** END LICENSE BLOCK ***** */ #ifndef MSUFSORT_STACK_H #define MSUFSORT_STACK_H //============================================================================================= // A quick and dirty stack class for use with the MSufSort algorithm // // Author: M.A. Maniscalco // Date: 7/30/04 // email: michael@www.michael-maniscalco.com // // This code is free for non commercial use only. // //============================================================================================= #include "memory.h" template class Stack { public: Stack(unsigned int initialSize, unsigned int maxExpandSize, bool preAllocate = false): m_initialSize(initialSize), m_maxExpandSize(maxExpandSize), m_preAllocate(preAllocate) { Initialize(); } virtual ~Stack(){SetSize(0);} void Push(T value); T & Pop(); T & Top(); void SetSize(unsigned int stackSize); void Initialize(); unsigned int Count(); void Clear(); T * m_stack; T * m_stackPtr; T * m_endOfStack; unsigned int m_stackSize; unsigned int m_initialSize; unsigned int m_maxExpandSize; bool m_preAllocate; }; template inline void Stack::Clear() { m_stackPtr = m_stack; } template inline unsigned int Stack::Count() { return (unsigned int)(m_stackPtr - m_stack); } template inline void Stack::Initialize() { m_stack = m_endOfStack = m_stackPtr = 0; m_stackSize = 0; if (m_preAllocate) SetSize(m_initialSize); } template inline void Stack::Push(T value) { if (m_stackPtr >= m_endOfStack) { unsigned int newSize = (m_stackSize < m_maxExpandSize) ? m_stackSize + m_maxExpandSize : (m_stackSize << 1); SetSize(newSize); } *(m_stackPtr++) = value; } template inline T & Stack::Pop() { return *(--m_stackPtr); } template inline T & Stack::Top() { return *(m_stackPtr - 1); } template inline void Stack::SetSize(unsigned int stackSize) { if (m_stackSize == stackSize) return; T * newStack = 0; if (stackSize) { newStack = new T[stackSize]; unsigned int bytesToCopy = (unsigned int)(m_stackPtr - m_stack) * (unsigned int)sizeof(T); if (bytesToCopy) memcpy(newStack, m_stack, bytesToCopy); m_stackPtr = &newStack[m_stackPtr - m_stack]; m_endOfStack = &newStack[stackSize]; m_stackSize = stackSize; } if (m_stack) delete [] m_stack; m_stack = newStack; } #endif kernlab/src/Makevars.win0000644000175100001440000000006012774400037014765 0ustar hornikusersPKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) kernlab/src/dgpstep.c0000644000175100001440000000275112774400037014320 0ustar hornikusersvoid dgpstep(int n, double *x, double *xl, double *xu, double alpha, double *w, double *s) { /* c ********** c c Subroutine dgpstep c c This subroutine computes the gradient projection step c c s = P[x + alpha*w] - x, c c where P is the projection on the n-dimensional interval [xl,xu]. c c parameters: c c n is an integer variable. c On entry n is the number of variables. c On exit n is unchanged. c c x is a double precision array of dimension n. c On entry x specifies the vector x. c On exit x is unchanged. c c xl is a double precision array of dimension n. c On entry xl is the vector of lower bounds. c On exit xl is unchanged. c c xu is a double precision array of dimension n. c On entry xu is the vector of upper bounds. c On exit xu is unchanged. c c alpha is a double precision variable. c On entry alpha specifies the scalar alpha. c On exit alpha is unchanged. c c w is a double precision array of dimension n. c On entry w specifies the vector w. c On exit w is unchanged. c c s is a double precision array of dimension n. c On entry s need not be specified. c On exit s contains the gradient projection step. c c ********** */ int i; for (i=0;i xu[i]) s[i] = xu[i] - x[i]; else s[i] = alpha*w[i]; } kernlab/src/brweight.cpp0000644000175100001440000000435112774400037015023 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/BoundedRangeWeight.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 #ifndef BRWEIGHT_CPP #define BRWEIGHT_CPP #include "brweight.h" #include #define MIN(x,y) (((x) < (y)) ? (x) : (y)) #define MAX(x,y) (((x) > (y)) ? (x) : (y)) /** * Bounded Range weight function. * W(y,t) := max(0,min(tau,n)-gamma) * * \param floor_len - (IN) Length of floor interval of matched substring. * (cf. gamma in VisSmo02). * \param x_len - (IN) Length of the matched substring. * (cf. tau in visSmo02). * \param weight - (OUT) The weight value. * */ ErrorCode BoundedRangeWeight::ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight) { //' Input validation assert(x_len >= floor_len); //' x_len == floor_len when the substring found ends on an interval. Real tau = (Real)x_len; Real gamma = (Real)floor_len; weight = MAX(0,MIN(tau,n)-gamma); // std::cout << "floor_len:"< * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ExpDecayWeight.cpp // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 #ifndef EXPDECAYWEIGHT_CPP #define EXPDECAYWEIGHT_CPP #include #include #include "expdecayweight.h" using namespace std; /** * Exponential Decay weight function. * W(y,t) := (lambda^{-gamma} - lambda^{-tau}) / (lambda - 1) * * \param floor_len - (IN) Length of floor interval of matched substring. * (cf. gamma in VisSmo02). * \param x_len - (IN) Length of the matched substring. * (cf. tau in visSmo02). * \param weight - (OUT) The weight value. * */ ErrorCode ExpDecayWeight::ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight) // ErrorCode // ExpDecayWeight::ComputeWeight(const Real &floor_len, const Real &x_len, Real &weight) { //' Input validation assert(x_len >= floor_len); //' x_len == floor_len when the substring found ends on an interval. if(floor_len == x_len) { //' substring ended on an interval, so, get the val from val[] weight = 0.0; } else { //weight = (pow(-(floor_len-1), lambda) - pow(-x_len, lambda)) / (1-lambda); //weight = (pow(lambda,((Real)floor_len)) - pow(lambda, (Real)x_len+1)) / (1-lambda); // double a=floor_len*-1.0; // double b=x_len*-1.0; // weight = (pow(lambda,a) - pow(lambda, b)) / (lambda-1); weight = (pow(lambda,Real(-1.0*floor_len)) - pow(lambda, Real(-1.0*x_len))) / (lambda-1); } // std::cout << "floor_len : " << floor_len // << " x_len : " << x_len // << " pow1 : " << pow(lambda,-((Real)floor_len)) // << " pow2 : " << pow(lambda,-(Real)x_len) // << " weight : " << weight << std::endl; return NOERROR; } #endif kernlab/src/ilcpfactory.h0000644000175100001440000000304512774400037015173 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/I_LCPFactory.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 #ifndef ILCPFACTORY_H #define ILCPFACTORY_H #include "datatype.h" #include "errorcode.h" #include "lcp.h" class I_LCPFactory { public: /// Constructor I_LCPFactory(){} /// Destructor virtual ~I_LCPFactory(){} /// Methods virtual ErrorCode ComputeLCP(const SYMBOL *text, const UInt32 &length, const UInt32 *sa, LCP& lcp) = 0; }; #endif kernlab/src/msufsort.cpp0000644000175100001440000002410012774400037015064 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the MSufSort suffix sorting algorithm (Version 2.2). * * The Initial Developer of the Original Code is * Michael A. Maniscalco * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Michael A. Maniscalco * * ***** END LICENSE BLOCK ***** */ #include "msufsort.h" #include #include #include #include //============================================================================= // MSufSort. //============================================================================= SYMBOL_TYPE MSufSort::m_reverseAltSortOrder[256]; // chteo: Changed the member initialisation order to get rid of compilation warning [181006] // MSufSort::MSufSort():m_ISA(0), m_chainHeadStack(8192, 0x20000, true), m_suffixesSortedByInduction(120000, 1000000, true), // m_chainMatchLengthStack(8192, 0x10000, true), m_chainCountStack(8192, 0x10000, true) MSufSort::MSufSort():m_chainMatchLengthStack(8192, 0x10000, true), m_chainCountStack(8192, 0x10000, true), m_chainHeadStack(8192, 0x20000, true), m_ISA(0), m_suffixesSortedByInduction(120000, 1000000, true) { // constructor. unsigned char array[10] = {'a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U'}; int n = 0; for (; n < 10; n++) { m_forwardAltSortOrder[array[n]] = n; m_reverseAltSortOrder[n] = array[n]; } for (int i = 0; i < 256; i++) { bool unresolved = true; for (int j = 0; j < 10; j++) if (array[j] == i) unresolved = false; if (unresolved) { m_forwardAltSortOrder[i] = n; m_reverseAltSortOrder[n++] = i; } } } MSufSort::~MSufSort() { // destructor. // delete the inverse suffix array if allocated. if (m_ISA) delete [] m_ISA; m_ISA = 0; } void MSufSort::ReverseAltSortOrder(SYMBOL_TYPE * data, unsigned int nBytes) { #ifndef SORT_16_BIT_SYMBOLS for (unsigned int i = 0; i < nBytes; i++) data[i] = m_reverseAltSortOrder[data[i]]; #endif } unsigned int MSufSort::GetElapsedSortTime() { return m_sortTime; } unsigned int MSufSort::GetMemoryUsage() { /* unsigned int ret = 5 * m_sourceLength; ret += (m_chainStack.m_stackSize * 4); ret += (m_suffixesSortedByInduction.m_stackSize * 8); ret += sizeof(*this); */ return 0; } unsigned int MSufSort::Sort(SYMBOL_TYPE * source, unsigned int sourceLength) { ///tch: //printf("\nIn MSufSort::Sort()\n"); // set the member variables to the source string and its length. m_source = source; m_sourceLength = sourceLength; m_sourceLengthMinusOne = sourceLength - 1; Initialize(); unsigned int start = clock(); InitialSort(); while (m_chainHeadStack.Count()) ProcessNextChain(); while (m_currentSuffixChainId <= 0xffff) ProcessSuffixesSortedByEnhancedInduction(m_currentSuffixChainId++); unsigned int finish = clock(); m_sortTime = finish - start; ///tch: //printf("\nFinished MSufSort::Sort()\nPress any key to continue...\n"); //printf("%s\n",m_source); //system("pause"); //getchar(); // printf(" %c", 13); return ISA(0); } void MSufSort::Initialize() { // Initializes this object just before sorting begins. if (m_ISA) delete [] m_ISA; m_ISA = new unsigned int[m_sourceLength + 1]; memset(m_ISA, 0, sizeof(unsigned int) * (m_sourceLength + 1)); m_nextSortedSuffixValue = 0; m_numSortedSuffixes = 0; m_suffixMatchLength = 0; m_currentSuffixChainId = 0; m_tandemRepeatDepth = 0; m_firstSortedTandemRepeat = END_OF_CHAIN; m_hasTandemRepeatSortedByInduction = false; m_hasEvenLengthTandemRepeats = false; m_firstUnsortedTandemRepeat = END_OF_CHAIN; for (unsigned int i = 0; i < 0x10000; i++) m_startOfSuffixChain[i] = m_endOfSuffixChain[i] = m_firstSuffixByEnhancedInductionSort[i] = END_OF_CHAIN; for (unsigned int i = 0; i < 0x10000; i++) m_firstSortedPosition[i] = 0; m_numNewChains = 0; #ifdef SHOW_PROGRESS m_progressUpdateIncrement = (unsigned int)(m_sourceLength / 100); m_nextProgressUpdate = 1; #endif } void MSufSort::InitialSort() { // This is the first sorting pass which makes the initial suffix // chains from the given source string. Pushes these chains onto // the stack for further sorting. #ifndef SORT_16_BIT_SYMBOLS #ifdef USE_ALT_SORT_ORDER for (unsigned int suffixIndex = 0; suffixIndex < m_sourceLength; suffixIndex++) m_source[suffixIndex] = m_forwardAltSortOrder[m_source[suffixIndex]]; #endif #endif #ifdef USE_ENHANCED_INDUCTION_SORTING m_ISA[m_sourceLength - 1] = m_ISA[m_sourceLength - 2] = SORTED_BY_ENHANCED_INDUCTION; m_firstSortedPosition[Value16(m_sourceLength - 1)]++; m_firstSortedPosition[Value16(m_sourceLength - 2)]++; for (int suffixIndex = m_sourceLength - 3; suffixIndex >= 0; suffixIndex--) { unsigned short symbol = Value16(suffixIndex); m_firstSortedPosition[symbol]++; #ifdef SORT_16_BIT_SYMBOLS unsigned short valA = ENDIAN_SWAP_16(m_source[suffixIndex]); unsigned short valB = ENDIAN_SWAP_16(m_source[suffixIndex + 1]); if ((suffixIndex == m_sourceLengthMinusOne) || (valA > valB)) m_ISA[suffixIndex] = SORTED_BY_ENHANCED_INDUCTION; else AddToSuffixChain(suffixIndex, symbol); #else bool useEIS = false; if ((m_source[suffixIndex] > m_source[suffixIndex + 1]) || ((m_source[suffixIndex] < m_source[suffixIndex + 1]) && (m_source[suffixIndex] > m_source[suffixIndex + 2]))) useEIS = true; if (!useEIS) { if (m_endOfSuffixChain[symbol] == END_OF_CHAIN) { m_endOfSuffixChain[symbol] = m_startOfSuffixChain[symbol] = suffixIndex; m_newChainIds[m_numNewChains++] = ENDIAN_SWAP_16(symbol); } else { m_ISA[suffixIndex] = m_startOfSuffixChain[symbol]; m_startOfSuffixChain[symbol] = suffixIndex; } } else m_ISA[suffixIndex] = SORTED_BY_ENHANCED_INDUCTION; #endif } #else for (unsigned int suffixIndex = 0; suffixIndex < m_sourceLength; suffixIndex++) { unsigned short symbol = Value16(suffixIndex); AddToSuffixChain(suffixIndex, symbol); } #endif #ifdef USE_ENHANCED_INDUCTION_SORTING unsigned int n = 1; for (unsigned int i = 0; i < 0x10000; i++) { unsigned short p = ENDIAN_SWAP_16(i); unsigned int temp = m_firstSortedPosition[p]; if (temp) { m_firstSortedPosition[p] = n; n += temp; } } #endif MarkSuffixAsSorted(m_sourceLength, m_nextSortedSuffixValue); PushNewChainsOntoStack(true); } void MSufSort::ResolveTandemRepeatsNotSortedWithInduction() { unsigned int tandemRepeatLength = m_suffixMatchLength - 1; unsigned int startOfFinalList = END_OF_CHAIN; while (m_firstSortedTandemRepeat != END_OF_CHAIN) { unsigned int stopLoopAtIndex = startOfFinalList; m_ISA[m_lastSortedTandemRepeat] = startOfFinalList; startOfFinalList = m_firstSortedTandemRepeat; unsigned int suffixIndex = m_firstSortedTandemRepeat; m_firstSortedTandemRepeat = END_OF_CHAIN; while (suffixIndex != stopLoopAtIndex) { if ((suffixIndex >= tandemRepeatLength) && (m_ISA[suffixIndex - tandemRepeatLength] == suffixIndex)) { if (m_firstSortedTandemRepeat == END_OF_CHAIN) m_firstSortedTandemRepeat = m_lastSortedTandemRepeat = (suffixIndex - tandemRepeatLength); else m_lastSortedTandemRepeat = (m_ISA[m_lastSortedTandemRepeat] = (suffixIndex - tandemRepeatLength)); } suffixIndex = m_ISA[suffixIndex]; } } m_tandemRepeatDepth--; if (!m_tandemRepeatDepth) { while (startOfFinalList != END_OF_CHAIN) { unsigned int next = m_ISA[startOfFinalList]; MarkSuffixAsSorted(startOfFinalList, m_nextSortedSuffixValue); startOfFinalList = next; } } else { m_firstSortedTandemRepeat = startOfFinalList; } } unsigned int MSufSort::ISA(unsigned int index) { return (m_ISA[index] & 0x3fffffff); } int MSufSort::CompareStrings(SYMBOL_TYPE * stringA, SYMBOL_TYPE * stringB, int len) { #ifdef SORT_16_BIT_SYMBOLS while (len) { unsigned short valA = ENDIAN_SWAP_16(stringA[0]); unsigned short valB = ENDIAN_SWAP_16(stringB[0]); if (valA > valB) return 1; if (valA < valB) return -1; stringA++; stringB++; len--; } #else while (len) { if (stringA[0] > stringB[0]) return 1; if (stringA[0] < stringB[0]) return -1; stringA++; stringB++; len--; } #endif return 0; } bool MSufSort::VerifySort() { //printf("\n\nVerifying sort\n\n"); bool error = false; int progressMax = m_sourceLength; int progressValue = 0; int progressUpdateStep = progressMax / 100; int nextProgressUpdate = 1; unsigned int * suffixArray = new unsigned int[m_sourceLength]; for (unsigned int i = 0; ((!error) && (i < m_sourceLength)); i++) { if (!(m_ISA[i] & 0x80000000)) error = true; unsigned int n = (m_ISA[i] & 0x3fffffff) - 1; suffixArray[n] = i; } // all ok so far. // now compare the suffixes in lexicographically sorted order to confirm the sort was good. for (unsigned int suffixIndex = 0; ((!error) && (suffixIndex < (m_sourceLength - 1))); suffixIndex++) { if (++progressValue == nextProgressUpdate) { nextProgressUpdate += progressUpdateStep; //printf("Verify sort: %.2f%% complete%c", ((double)progressValue / progressMax) * 100, 13); } SYMBOL_TYPE * ptrA = &m_source[suffixArray[suffixIndex]]; SYMBOL_TYPE * ptrB = &m_source[suffixArray[suffixIndex + 1]]; int maxLen = (ptrA < ptrB) ? m_sourceLength - (ptrB - m_source) : m_sourceLength - (ptrA - m_source); int c = CompareStrings(ptrA, ptrB, maxLen); if (c > 0) error = true; else if ((c == 0) && (ptrB > ptrA)) error = true; } //printf(" %c", 13); delete [] suffixArray; return !error; } kernlab/src/cweight.h0000644000175100001440000000322312774400037014304 0ustar hornikusers/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 2.0 * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Suffix Array based String Kernel. * * The Initial Developer of the Original Code is * Statistical Machine Learning Program (SML), National ICT Australia (NICTA). * Portions created by the Initial Developer are Copyright (C) 2006 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Choon Hui Teo * S V N Vishwanathan * * ***** END LICENSE BLOCK ***** */ // File : sask/Code/ConstantWeight.h // // Authors : Choon Hui Teo (ChoonHui.Teo@rsise.anu.edu.au) // S V N Vishwanathan (SVN.Vishwanathan@nicta.com.au) // // Created : 09 Feb 2006 // // Updated : 24 Apr 2006 // 12 Jul 2006 // 12 Oct 2006 #ifndef CWEIGHT_H #define CWEIGHT_H #include "datatype.h" #include "errorcode.h" #include "iweightfactory.h" #include //' Constant weight class class ConstantWeight : public I_WeightFactory { public: /// Constructor ConstantWeight(){} /// Destructor virtual ~ConstantWeight(){} /// Compute weight ErrorCode ComputeWeight(const UInt32 &floor_len, const UInt32 &x_len, Real &weight); }; #endif kernlab/NAMESPACE0000644000175100001440000000527512547246130013142 0ustar hornikusersuseDynLib("kernlab") import("methods") importFrom("stats", "coef", "delete.response", "fitted", "kmeans", "median", "model.extract", "model.matrix", "na.action", "na.omit", "predict", "quantile", "rnorm", "runif", "sd", "terms", "var") importFrom("graphics", "axis", "filled.contour", "plot", "points", "title") importFrom("grDevices", "hcl") export( ## kernel functions "rbfdot", "laplacedot", "besseldot", "polydot", "tanhdot", "vanilladot", "anovadot", "splinedot", "stringdot", "kernelMatrix", "kernelMult", "kernelPol", "kernelFast", "as.kernelMatrix", ## High level functions "kmmd", "kpca", "kcca", "kha", "specc", "kkmeans", "ksvm", "rvm", "gausspr", "ranking", "csi", "lssvm", "kqr", ## Utility functions "ipop", "inchol", "couple", "sigest", ## Accessor functions ## VM "type", "prior", "alpha", "alphaindex", "kernelf", "kpar", "param", "scaling", "xmatrix", "ymatrix", "lev", "kcall", "error", "cross", "SVindex", "nSV", "RVindex", "prob.model", "b", "obj", ## kpca "rotated", "eig", "pcv", ## ipop "primal", "dual", "how", ## kcca "kcor", "xcoef", "ycoef", ## "xvar", ## "yvar", ## specc "size", "centers", "withinss", ## rvm "mlike", "nvar", ## ranking "convergence", "edgegraph", ## onlearn "onlearn", "inlearn", "buffer", "rho", ## kfa "kfa", ## inc.chol "pivots", "diagresidues", "maxresiduals", ## csi "R", "Q", "truegain", "predgain", ## kmmd "H0", "AsympH0", "Radbound", "Asymbound", "mmdstats" ) exportMethods("coef", "fitted", "plot", "predict", "show") exportClasses("ksvm", "kmmd", "rvm", "ipop", "gausspr", "lssvm", "kpca", "kha", "kcca", "kernel", "rbfkernel", "laplacekernel", "besselkernel", "tanhkernel", "polykernel","fourierkernel", "vanillakernel", "anovakernel", "splinekernel", "stringkernel", "specc", "ranking", "inchol", "onlearn", "kfa", "csi","kqr", "kernelMatrix","kfunction") kernlab/data/0000755000175100001440000000000012560371302012616 5ustar hornikuserskernlab/data/spirals.rda0000644000175100001440000001115412774400041014765 0ustar hornikusers]xy4n%dl!)eR[JQI%)PJ E$Dٮ``2 Z~?}9yyϹ}\ CQQ>>>>%Yp\"-[ĥ>^g=| uMU7*7t[?]Cǔ*5fBeЙ!HAJvWU%/AߒƫqUhO$[*t_t&R>G°TI t%M%od~zozǀIph.7[}y}  ~6Β1"iπcۛ97XD3mrP7r0mYuy#6s/RzE,D8s!%BPuN8䜝y\ۂ ,! aʇwb>ieg !g6Alw[Hv~ͭ^1&0seX rLYb曺TZo<>Ň6Op;bW=yAM"%[X#Zxy*V;]Ʈ }F/9' sm#nEcMb%XZ4g}L_"1!cgdoGN2>M7s[k;XUjE)%͡.zxU.h7_Dx _rXv .P=7uzaBet>r.Ҿ~)<?ERqt" 16⠥:#1ܒe.5 )@ TZA!Meq~05^o ~S?7yH|q,ݜĜi/np$Kp*cóúq|T_~ z_B.޷_Ƞ +m"ٙq@'|Fzl E48=fvM8{>cfh!c$:дLUƲW;GGaִ޺w:Hwr< P"V,QG攟\mIM9'{Z!U[![&}ib䁭ڋT|?_qf9??Qq|EE y0RnAqEH+i1j%!;ʁil=u Kެ(uƮ d뿅lMpT΁oK#uڅrS1'+ ֏_ mz^nsHFvpe֙垏Cڭ^c 5d.1c%>.4#XT ?|{OdP?oٻ"Ur3X[}?RztG}ivLh/ƫ{įӞw ʹoZ/\cuoIvnc#HU VB9TpєDkj3rHfN N{E7*)H0=\Abhw]@1 sW_EDG`ԒYo`I@;@0M{R9z6pNI_+lNJY-{df)~H1M/*2iN`/4"&啀p\߅OA;A*}Ţ'DaקYq9LfJוu.5A™X2Έ8cFy"W}{8Tu< ejlM0]a3# =0h%]'Hs.w嶓%\ H,1/|FuJ-*RsGV,6"4l=*ȫ?N򣖰9O׶(G4%!lx!д;O`SpϤ'}֝GnƑ7ƪ,EH8)[ñ@00l(<<Ⴭ..s+S:P* ve̫m4u!lIkzcΏC7"IŸ&$>j,}o҉)8b#GV zRPmPuIVi}[YcQ!7x3Ɖ:'4t$f{rS9SyvroZI#ʪ^+vk.8'eQ$U7[pZq*).:S Kނ;q[a@PI9S35V3mCvl\/I6l%c||;Nk4 H2*!{U}4],*r7c_LV.3;9DxOߴeAC5$x0#5)4yF9mtHPq`abf TLp@|RVU -SwOpxDet1hҟ5v{dN'U[y*NUPUй"z>؟I9d~_芣σwf25á(&o)1'YhLĊIH[Q =g/@򙞰9P+(>bu.2G༨3;:`D:z*d,eޣbI=)~vEF?HcM5KN+KJAtxyͽJ_e{W.Xiq0sҴİ`x^r1 ?C&_%Mg3rXu(gSJ<2FBשUG6‹)R3); WrY|d]+5&,z][t0fSYWMB}K1j0T> .L5MLKlKpQO;PX>& h(]4~ɀ{H]nuo-79W^ eFԺм,2BZShXF>5W?B/(KY3ԗh* wieA~&iȲM8^{S-Z,ON?|qFYGʕXdw$CH*\#R+Rˮ6zPs{l,vwސD_ǹ&Bt-w@6Fj+ThѻucWC@kͽ~L O]évXa1ףb<2\UNcvH>-J,[}lOmHK&ABJn[~!~dԳضlqj3l#F'K㰤Fw`.zŽI58`"]`@mie!أ<:;$ m @_|V'qh(%E8b Yyf.Rh{ڳVbKLdv,>̱gҢʁ&AY=Q~k>U``)ڈue7W MnGo s>{9Jk/"8 *t(+ƶr.g.+y-06WRvDҕ0F_K2{dt IݤI! &nhO. \נ"#CZ YѩžurBN.cEiħ|@{DE*NEg9c-J]F[r0zlTz)z=FYXVvkBareX[0}ci#(\Ra&L6[$өlVٗ˒c8vDz#ND?F+%fgຼԾg NQJֻSȓS:' .^[40YW?<$n#&%-fEyhN2JTAs&`̫ `VިkO^}xί)Wnᡑji| nb8sՀEE”Ry30]t]>~glO80IQ#eooٽmeTQ&QXW@x'DnY02%Y:?M "FX>#+,`xx[kjDPgҫ:€o6)jsz(|{j-#{dLHA`1ӂӔ ]ܡ++?-ou_է#kernlab/data/reuters.rda0000644000175100001440000003773012774400040015010 0ustar hornikusers}ݎYrhf%Yi0;8A P]'bf9MR잙]AUu*2kE|;`_XO ;/^zY^n֭U9M,IަˬIµnK'*\ZӃGF=JeV?[~vUiҴzf,:+wmf_v46t(Q#|0yzfy4-yj|IP{]FJ~V4y|lIIhB,)/Ûs2.b&ѣt=mDCE6L]fyNzڭѩ>/oYf˞Ve[W%8+M-]2vpn ANv_:Y,KW"LVY}Z+7JˊlvNv|:.Ma>MeU,6$}//:>Z-6#eqyv"{|n /(Oٴ¹:Yc9>)%*ϺvDNe(*/;Gs[Pt m>si!*']JJ]tuV]U?e4Y88$o銵|~<ԲЏNU#Ьn\!כ{k];UP•xlzM\!%6[+|jRSB=q 7RNlu+B<Qu#Qdd nRˏ6㑼+Y썐Vݒ#j>(SpCUե^e.p~7SC}ܴEf 3q iTMԍb{*͊ 8~#ݸ2¥ˊ [Ħ(N9,[37ͺ b(,Ym./7okҖrtkɷ`D9my[ Q>]fOڭ:HKW_agIsM )LUTR>ꌚWm$9*"\{l&"7- Ur#_ODQOu& '*K7T3Gb ?_ɣѭXv4I8dOYfr'v#B+*Rgyi؄&Y`im?|%KA4Nu-F~W-ir^d!mWxwqbl~_\]ld魦&B]pR}0석}Qɏp߹)$6Oo|щVɤ安=[ӍhĮn?/Mv[`,PU E:5nnSvO!Wٓӟ滑RN(lNryuep^RZ{>p{B֞Scj !U ?]>XZ<{p#ﵹ2lQQۑf2âO=F(lҵ`sQ $+F3){ZGϒ[#R3&9U1%ŧeTbzpa8r'|/5VU"<+q nU&\läj""v&Ttf]!DZ"+5m ƿ"U,1V~ z~gpVG.R." ٘-L^f+ia"88\mұ:ٿ|#xS^SL7yQnlR<w_|Fl{T~;]//ꉃegjndV|PdwunW%̱1mPR$z~c5ҥ>%zۚ{۲wbQl VݽW {bp0>T!T2a3ʫr!,73W[~V*3;~?N^$ WVAA^zݛԑO Ѡ[2H&}{[QNňWBÇv[$FpnTEYWE8g('"wΛ j.gg~Ay`?vpI YRYUmToq6P$"`u_ H"oX[׮]Vj)$b!HJ$יzqˉv j]1w<ك=,gx`S){k"M$o$f2b酪FY|J8hG 4P$b"@@(MNſKFzb03J;hV| A 7hmѼ鬧<Qķ6mU_cwE-Պy&8o-}RG^_fZTF^x'F!iʀd|Cacۑ)y-[xxz9O }hU5xϾtKl`*GKXIș#C>BEBWZFz}μِtos ޵'r¬>"J\|% fʐ Us=frhqJ)7N2}VS7kD4wԗ$>vf̑4WHW.żm0&{G`{|%r7UYźG^{nvɸvMxXٞXvNP,j#m#/E&Uմ VI j#WoRyU+<[eV%3 XA$ßQcwD( - ⿣} ;¦M*>DrAG3B6ĨBq:|&aƄ!A^i|(lU+ȰBw_e;(⋲ycRnx~yu{(P"%Cl{9-ŰH f"-E9 /#Mɇ*H_ȨGU5*",s'ڲĐvЛMVQHJ ,҄{n#rCu DjUQ"vLDܕDAa#J!r⢀nYGڐ1'LNvb^\ GR, &Ϊ%|2l[aKBHK"k7 Mm[l1t608x-(AʘM\ieD]A):4kDI~߻Z~$o5nW D҉gूͺepy-L}=R>X 0hz KXZ N[b7qZ*y23_yg0UC37B@}i5rg \v׿6rqiʆt  Q&kW]}YT,&205ZʢC Zz Ge i}1ɽ0w12v8>wZu!g6km߷Rc)wv=їqĉHO趪ۈ6ӉUG[LSc꣐SdG•B Z8T#+x1("\'S])&Ip;Pd۲;igM9'mr @O l( z[hUv =n-6l86~1N_v~b|BNw&.w7rްVЕRK雇 S+1M-oϑ*!3^y,`GBh Db25J Q5x ?x|$ܑ90"a:յCVQb%b^xm A䕪MLVF .F!|Y8 ;QDl_G~=*7$D?CQ4`5UW,a/T6 _G)$b+=x. _8vB!*'{m13X򦡏=-/W2e!E0~>;~ig,AH[>^Fy/d ~j9!O"hLJO$1>GLJt$Mp^/2m޳:w5UϜƳNCbDh):ҢDv`t*k|CMKy2>.leޣڣmBI>g ]8F_-[#XwQ_͛MCHqeJlk[B!*=/#s(&QȦ<}~;_fqDQAf"뉋1dhn <( pg Ȃ5#\&RAvĐ-|L0>0( &.*ޚd~w{8#-cr;QՅ|qwV"V%y&&-qBb ?uF5Xċء>T^7><;#L<:畵)3wx0z,V ɯ88,yz<:y0m$FGmih8#aӣg0QY=k0csaZ.[xoc9s sQS0W' 8e+*Z?YHv}YW'J7يg'l=-N=Vxt?Ezegm Sϯ]Y:eByI QH^eTzdhk#a~Et% Fɛg00$bA hQ(632*ʛC9V]m ZBoIsZYٙH>pIFTyޱTةA>u)>]üx|#ܤp#〡B0X7(f{i ҋ1tdG-_X5c@ķELRS^U1l\V,y[AF6 >Sf/;^_bL*Tp3?8.}F,R4 q+in'ۖwU^(6CϏFONND[OJ`Ŏ?> y@Mg$?' n#Ds;,b_[O6-uN~.,^N1`z#&β;j3҇?^= /oŋJ 窩'&Lވs@mx@̻)uVRxs>E¿xv^袚蝉fMӪP(dJx!{HV} g|:Rڋ XV[7k;x)4EoTGF^&krW>xQx;{ݳ7\n'==æV|x_,T7S?ѲmE\Y-ۜ/Js8x!<:8J5NDu>6CPݙgfbܔO<W`,5q_c/\*C%OlmeENޑ邻D4&&ܼ }x$W$q $}C zPV֡;>fe`\:sGA {oKIRvfAn23TPթ6'sgyV͋)7[; 1Dl1b?fmG6L*W#q!b.g|*nӻ7?AwVVDA(=z/]^Qk(GhtO/_篽Z D@m#^2[#q!Tc __j{54YORF-" íUIW*<ӷ/=Nr4,1ilh_ &1ĕog Wp2'Rb{o2Xٛ1k޻{,(gJ;6('fbE'uې @ߨ,E!2i+qVM1 :]ZW2F5wn~y17X q{@U-YBB;|_˗?_[skyZ}3s>2#Xl;Hes.7a+%`,-6'9@ ?3ϚaW?\x˼t1^AɈxk0o $||֚&mPM^ˋkE xl$}p,Zj7OTMOW,1ڼ+B )H!)ث'F T2#l;'q(1,V@M`BҦB'lloP U${,>J,M Жǜyd] 둴!Ov|1F!]6SfZw ˷|qgNLYXLQ=*qQzV}zAl*527pJ۩s`٪t2k 4ro˔Ϋ[oi3$66gHEԌL0W#V p%&E `RgO߫Rm7sS#L b,'Em uud З"$jw-I!"B^>Lg'42(*&}^U[]NvYQN/]4?lN Ux!9YTDk1Z!Ϥ& oi8+ `wpNCB-YںEoyQCNrC m+N@^hfa0(R@wV'*x3{Rld=K.hU}Xtt[*UԃҦ(!Yu2°DvCĈWk@8kj_טK=8~W[mTbʨ]ݎR/B\f+Xo1J<-6HZz@vch&o-2 ҫqze>J~ ڈeMK%[zj/m9YIZ=vB ŤH4vaFb 7~Jlvl2O:s V#Ȩ(*a [?9kb+΢mY )Y] dQK1JpKle1ox.xN$ÜIyUL.B:'r?8ѡ)ΜܲݜFڌ ֩5~~{v1iƋxHk Ũ`.^̺7z/~5UdQQm{6B`ʫڻW^:OyÂ$G}cAxچhDMz3bZag}o4RVӰim,)Հ7ׂՒ]-b+"Px=Pd| ^ga jBgYKo(iq]MEUOgeV_E-SD6 e]G6I2Hh!1D됮(U3}QCH.sm›2kC܉j.^K[Y^/"ge6ɖ<~Qq,Z>9[h7ws E7gN y%'e(;Iº[F]A-O5ìLį8ðECKt{jͪtj⍷gpi(sgzǠ3[iބx0.3KNE$@fJw !0lQ+Y$}kuhs)%jja.de3]Yi/jʼn~1Jz{#|hu=w#yd{B5<ʏ|bVm~xPPf%da*uf`&ݏKFBʛ ft h秞۴?)uв Pdm߭sw_`Ozyauvym(2a堟n`15j_4֗޴9>wjV덙x/YeLቾKЌ cD`ȨQhLDT6ƐǘAdv _cVqܢg#`L80zp3n0F8{Ӷ3yaj}n00Mg -28X(/t2z=MsjbBR[ӣne~q ON ;[F^]TP~^@}&: >3Y]>F,^:1oDsgSv``0LfഃKNH~CMsռͩ+>@<g[Y[_[Ǽye qKM69fy}#D٤A_](K7ˊUγ񳛕6 Nݤݛt(H چE * MF[2C{qy`0Zf~`iԜ3thaG#[LbT]թE*P%i-NaTDo62 }O7Y#BK7D6yYi삳r_UjOVJd<ɷi]5 *mbn:!gyZdIʰ:=_XRèoFZ$vLJ E֕6F\@d(bMZhYF6pzFx( iu:s~VJ!!61 Nu'8s|֍z j+Zm@Mê0H[3oulj>r.ݺ}Vݝ}kjza~}Ӫ LgtWUPuM|{e5P:(!(䓨\fNtZ5$6詅 Xiၒv4&9?`NJu DH4$u|I )lY0.7VWq1GA䍎ie]޴z@Gz`~–WX7pe!YYjh6DInYWx?& C|qՑ zZ^=.eLיfQV_3I\]B9kZv)ZIގL[ߞ1iION5E m#w׻9Pȥ=ϡ}{XҵEC~,.qbISNTPn5R$fUb&ktSoEzҋ?^09zjcH8* BUNo\< Wσ.F]SLkuMZe$0g4hW{!G:bh }#.|Q+Ylq.0\meː@5iU5LS{FJ}5Bo5P|aS< X>svvvm|7/ba۞C ըT,bqYsl:ۨeRA%ho"5l=5=N}'@Ht٬1σGIr!RkWѓo<~4וV 1>=~G5:Iw&Q{E_St{PޤAW&(k~o @ɨvWk8qí'U,pvݷ6TM0^DŽ&x,qV)Zqae.ɬ+>d eUb>qtU`|?Vٸa#ؼ6NIc_q<>S:HrD':D$x䑎NxкL%46 ̳PDȞi[)$6/Ga;yhFY;w˶6ĂΦx{T9EsM)*S[)c@WMJwW&|.jtmD]?fGI^OBod1ޘ6ݲ)pPiÔ7e疌'~d̪)HckV)DM=fΤ77!*,ASFh-kȋm1F`4^M;&63]۵t0/ΟlZ6HLk~eF.ʴвj᮵%kx!zRo(V(dߙ^Vp03},bL?b-Gs0eEC9ػ͖٫x:fjvVO"[ +4bJ8?VܧH( c jk`?[Uެq F3Em S93D](繰yIFj!LҢ[@3Z ۡ*1VQ;x: Qig/sL7)cNe6٭Vk@'\ʁ9GM >G:ۋH7Eބ gA © CCp" "*p=&Vݷ٘p/_Ü%m5ZNҀ}L$p)}C^Mw¤'4m@t~|U[<0B3& .q4YT?]Q1d0*18Ja{F`3ZY%rϙD94_)+yF)*lVaq ߷z )Zc yؓX5[l\`# nH҆xD >/?Qv#'SxB 7;o+ T{CwWmNvǀW ,|]ֵȾ~"|WeW5cmϼJLYPc=@IR $ۦeW >B\"):rb{^4:BΣO#:8׽ <{('ψBLIX8=D<.`<E 87jMrv_]/[q.^)`CϯLv(!|廲_L~q:خ֎r!~4w#jt筇Js퇉~_vw Js݈|fk71&hʍbx8%Cx!7tIjg佒XV/"CC|#,Qz2ի(8DwDN ,N""tB𻵅@eDhhΫ8=|{DXs>; ֊-)oڗGؾc(TP}%t(;BZrtɔj(LSh{2Ov0+ak/y<2!I8lDzԾWK7}) !NeC3NeC-!_Tf0Ul+2̗z#Om$Ql7iw8Q|)lh-`(+-G.*ޛeXZ(E15wKS|_vHsShf( C`*[xlL^.|X1eSz[ܜ҃KPf:sH㽾d`:qœ_J)Ic $Y}."Y&z9[[&Pl{My`@ː{31cfm"N*<, 5YƭGx]gW?gy"arL3^\vy|z Kߗ4wPkup{X1-[/+2Q+zLwG1-a&6dtE "uviccHIawL'7XuB#CJz| "3d0jK< #]i@QѲP@ d$40%ܳض4v (&&Xm6D/ _ $6,=?ޙyw:\m,lՈ5*۔]/*I/\ÜI$z~XVDXp1}y9U­66RU3㢨&3""{AJ}{fW3p(=8[r$ywee-m:; (u2dݨl `|x Ǐ1ַOFoih'*xN F1x朓~<3&hXQ8;jSL{̙@~oä_}ZuM-_|տ~_a߀?rԗӯ/?Pm}돧E/G 7ekernlab/data/income.rda0000644000175100001440000012321012774400040014556 0ustar hornikusers7zXZi"6!X[ L])TW"nRʟy,eYlDcNC[k26iVAM >ҎpVzg]xs̰6 ]:/~y?s-ҥNd1ꌻ;.5cBqHYɴ3-0,w.~6J aOo!6C%+˄#Q?tBJaW{`S%U&B^0aǧ9:'w3OMn Jأ-+)HKO\ Wp߄w8U䳠 Na@߽=ؕY<2 pr-,H $ Q(2f*9[v}॑8ymV4ǃJ;tljj S33v)ΡyۙRHd,JP"?oɳeƓ.VO!-挿B(TRqŧ3(ŅL?,4G6O)>WӔhB*S7#]>V*C)zElQ{߼k8,Jup E<@e.2/$߇EBwS&!dkupXNIiWߘ/gبB&.lqtzwy\cڿgi-$sfx?-J֗ >=?>HR\"7 #So(Mi3oQi l 5/%-8kq' S*ͦS~1qܠ}JVhYeDE$4xSex88x )3uS8ng2 (&ѯ?{E5 +~Hv^```r,vQ{z/S63X$RfYwoyI7م,Pw}mo=~ <iaq =.%)kÏ^̄W@^%ϰw9Zr tVxV Z h -NJvK W5 t2w0=lI1sDg\RVƸP67,۠A|7ܪ|~ \JZB E5E.~d 縳'^&fD;Tdoi%Pb F*lWILZ1ǢՂU FxךE!oޏ{[ l4I*ʚu_/S! A˶}mxT'/ @\jt+"٣+N:KhԼQK!dZ"ÛȃQde[ `FH= Ӭ-s@9BjPNG̙f<0hAֻ >񘿆lgwSY5 ^Kl)]׆-J ~pN3#fxB$fV-rrِBE*NO6b6HA>D=[IwGؖ& 挀Vb69+mwԅYL"I,;a-.ܓH()Upk(c_jh,IcpQZ , KUNWG{ YR5 @,b;]x𳰀:{c,\:,J;U* zQ#U0@g_7=2yMEP)!O h2+*(OݲF,e k:HpV Է,к+'7o+H3ȷU{f v ,^^IS ( nlFKN3izeyZe*}GKհ t#乭7ktDZ"2!b‡o 8^I |ty2\/y #V >a!^p%TemV@ٷW5i[<c/%QՌ܋; s,&́P񶜶dE9nR*C-rqԷwLe2C[&8Ҏ Đ6KZD:!߶OzL+'K^@<]JFG{a"SeF-Y%n+w[ȑZjL&{m]H7}Jؕf̈{2 :8OcfKpEl֎j,>Q,YbKv Ttɗln&c"9c+O@*s~j>Ȗ,c0b=.H 6 Wmڹޝȕ#z!q^I%j;~oQjtt W[L8/]h>D54E=#sȚy}tYYeujfWzӌR[)Jqo}["AQ/epgwI0=–ķ5Iκ!ҿUBIQK8i0KԮxVt\A ?Yf+yAȩ(XY}Z}8/)X[vc~S4(' ;U P yN)(3;@tvwd+yv3/}U= \l8cmpm@4 DPZn! ȏ2sqHqX qGQ4tD8:7&z~ƂU) Ѱbd?ܨvj(jgeg'M shٖ;RN䠢 ėwyBaN 'ck ;ZH$rA?9;Q# =\q-6# @G&h3X?K^ pd'l;C/S@YQ9AIɌʐl 3lvHvo9^L񏠟e$d̨Wu=3S=y<\x9mBs]/fW-莯K/Y*!S$CB9 2A[fkr_P!*\H?7k)E>b!Ao+j [_si@E4qK͡n~J\"etIW= `yO=賵D).'q)asi[~0 H0%ܼhɍ,hzJD4RF3$a  /ӪIcJ)}_Fs#\PDIzK!c;YP}@ʼ.m>xߘW!A{<;R!-1Vdqsrکr-Ng2Гw7MԫJP~^٬}(XZZ4c5B6g~12H}b4grD͉ WP5[%кT )2HI?dRyMaal6E}*eC3ǘ=κX=r5rg>T-Ϧ("XxD&9z_ tƎd0--Pi&T!KGI`֢'MW%+S[BWeiy|@{s'zy]0?zFka^(Ȩ#Y:kLNB O!j ar ?Lͬ0z8vi:޿yWFp1(n*]nY^QXj$Ԯa؝oG p&ިn<ݭ7b>UlU_ (D{Λ}~WYkr >D\)U]1'Gܕ S VBjEKMBY+ճaŅ;[]%zkQR7e&w?nmuSs*iB Ռ|t&ﮗF95$m-B#NQ-b(Lǃ+U#74 =T hh|M/yԅ1-Z-5&\B7->;M(ߺL<*;{MO   S3Ƨkc-+ _:<50I .v%gjCT ],mr Ӊ1O'y~Gwo 4}ڶ]Z\J>ޕ`OٍzR, +Y#_7/gdZ>xy fZc01g$ ֚lpd~~!wL_F\7r'8-?Bщ)6 t +4ɜ.VŽNT/l1Yˢ]k6~_ֈuP~_'QsOvH$l܇(#ݜӥx9WA\;Pm99L3[ M7rķ}0$>'W(~}M={ ܵ4N5u% ^KYU"7ɯ"\9 "Z{e|x~V)B\JO@d;罍sdGtctKqk< |[;;ks)(QS.lC>)QacY "[|uE18dizUG%l1Kmpz=ލt h'@)ڛ7[ O5$*Kx5u*ӟL~S?Ιh>y|M;e^u28uÜF4\$Ak|VU'9Ou<::"t_ۖTB[9J[ f|#}2(kdd'WyOB֍!,)p-}? | yꪟOLz^kע.FLZ_(xZ\qs 1~$f-.C|AgN$t^ʤDic-^Hg{uW-+F}j:IMU[ԝ9dU/)FڬO`0rK(԰ѻDus-":J aq^EH 1gD"d!(,BvK.;:  3Rd6[̤+q. To'-CvbcXy'C`(/ܘ3 ]MUP[DZf Y r[_qH] ^̖Ga;]]J0B^zQ= $;NCLJTJV '4IEݩ dP%9Rw󳾭ޙ-qQ g񸨹Qk mIZ_cČMvwo:btSkl #- vŤ| fR%i,PBD7_oT ozS"M, e@v&t}t!xr1H'?E?^BӬC.5߸x7츢/]4{* Z fWSϊ6^2Qh+t([ Joc j*XW-Nͤ\ƏBamTXfcN m g],=}@i9S\1s_U%DɠUvnY}SV]"fpM9K2USR'b^v0h< Nmp6Ґ.Ub;bP!ZhLg寢"U  HU;Mtt1~eFStL@޹] g&pРתӳ j2Ӝ^a[𵇟@7Knaj0p+i}fJy4Nʹ}qd98GhݝN2#B9 }Fu۪.^Ah|_#ĨOZ,]I)2UJ4gAY 4ʣo@!'pLgM.(j7-ͼb(PRVM vM~H{)UWF)<{Oej$;c?bb|Σ`-̶~[S`z,9|fS ADMeY[^ApYKl2lZ`#<~lX=n'<FW}e[l,e Fڪ)}-X)\ѲM¾(^]f҇\3 c,+cZ[=yP#"RwSmnTy/oLu:qR?Zl0D@HaIPTxAEC϶R2ʗ`hGfHt@\-a>%~Q-')HV*C5`]fZtc:l!zHK5_Fg<G2M1567&ʨ 9~A}趗5 "7 =7=76h~Irw(Zw;{O?Ed?>TpDpI|paN9db1DEyS%M1Z+:uUYzY/L}R؅ԓUsTc_#d᫙ˎk= Ja˅Lp\$DMpdBNC5a˸%X"QYMT:dq:ŖardnKށg%7 F(n x,uIzk1@c֑& T;n1U .)ֶjсr Y|P>残N.il~68_9 N1LxyZ=楫:t]:גA(KkoA%U H+Ic3{Yٔ\(j2gyp$P0ߢkHz`6i))ȼ܋s z<, sB'0RGzl <~?3ڼ?8f+L6bhţ<j6 |Syۑ=!mN&%Bq5FstߣMy` O]u'-" ɮS!N vsᓕ~n Pt)1#8߽1XbB*4nw7* 4:)^Y5UuO1FB.A5y U+wRNwƉ`D{$?")}OØ"$cp`{ݏ#>0+BOGuex@㘍蔝VU0둸co@Nu%2OߝV}8GÁ=g6 H -QDI9#),i! OlN)X!|fbm|hEh`+ Q1Ku4F/5NDsϡXCaZp!~]$[>{g%"gBv i݉`[[WBZPHW>uv(E<Jz^n=V$jo>Q\ŐavQo/OtC%B?~PAco!۞ԶHb6Ձʷ6dzkU8\{ ܿ~&UgD]nn~TQWӠdĎ=6M&%m&_ok2\hܕ"X$%ZIq焟:oˎ$ Wȹ708w})-n~FiNϭnăzh$FD]gP͞Ҷ&/. 6DtX)x 1+Y D »%ضOyWկQE[ 8w_YdJ .\@!b8)hNq{h^ ,q8/¿$z Xpx[RSiHx\:&Z!%R~HL;֑MT.n:oqd=vEŋ VBGbY{mdO̬)+kK]ȶc6MA^n|0pDwI0lpuEsEVƆ%d^_+ `b컙" a3 m\$V %+EnTt1Iӏ0)|bbMu걷~I||J*U}˲|Õ [HMq“ ӖdB(d7d D]߼G͂;aW/ 9`Y& sf/$^W:^hPF(\#}QZ S~ٓ:0= LgGj\?_ P|W<XJL[KU%:Fu6l8 }\Y@%2vr7~ $cwqcIMuK 4ET5ݨ~\Aп6Û C b[ @;Y4A4XGhɭP&Q L)Hյ{U >"|iFZܮ* 8b`MXJQ-u6NxzѻlĞD\~0:E3?, TRPkHo-8%zuin84}U'SݮN8D aZ1gS  .짲 X;QC5*rtP?߫6HG7jϦ2E_}ņuR Wqy6o GEX蜻 R*psruc4Qw%(AU^4/>Ԏ&d܊/.#33;[մ<@ۑz9 V,ė<AǧڵB ^ lKϊ"& YY䊰y@e'Xy6^߭8\ \.{PGIЩXђl| Bw@DA ku&5R˘JEL9m&,8n6mIa0F" xtrWf+mgQɡ,0Fu-\.90Yy]af%uzUͰ\7(87e: ;I{MFkeaSOÖLWj; C xX4n x "ʾa?*u܊uvAeh! V2"A0hZhtyZB( {VFP^8@c)-~@. OO5WDyLgw,DX2VɂnM":^W5I|^Pkl>"jDiH>| ֪™e# @eRP2 ]Uy5aEqϰ),<Ed$sw_AP`C'IuJu*ZХ4=f(~rMzBt4Y|$8Ui#F3ޫ4ՠl ki 4-k,cW&s0@!xf䧰`PϏڥt*OUK:m8ukQ8h();Ct56M<#-.)*D8&dX64` Aztx&B7Ľb V9j6I,D۹#׋!$Y~ENFxd4Mj.Kc]T\g $uwpj>I?4{}>+ޔ^ j Lx/1Fղ#:,]=\N7𒴨+%-w) H\ʀ_!U77NԨYM`I:p}ȪA{A$#i•~$4Jq-SuU6 Xȯ)Ԯ>5& 1v*cm_GzeVxISI'$WCh`V,l eʙ=:яƬϫb}|n"mqxJ\-S*Iک9\`2?DwU;3RxEKz('` JzY[>s7<+#dT6Xw͆޽vd cܣ H>h i$7u~_⭪<`$M>6y3շVѩ ,Q1e`EY҉yHݶG3旐&Zs-{$a6E`M#xZkcplh'p.8)ۏUF^΍O" sv{qaR\mH׽6Jh5홅NJL 6ʶBPUFY:|ĮR'dK#P^qCY~ROSrV'W_6ݚzdqՑeǻ47 -He?+#&ZE}~o!zs#a}{il{i?T{._3-#}2©lnVk$G;"q"Cã2 bbC=z$ޫ;{|EQVKtF-ڍsGü=xXY?s|sVxAxsqH"5 Fs d!hV0L&Rb[| lZ,` bRcڿ%(r9ժFy*ڷ? AIC 1*@B8Bp/2z&tAPtxbW,[EbU%{k#8p!/˒6$IF7!S@M^& PϳIŽchP1#=:ΊH7rΒE+9X< ıjn2TyF<f+7((Bg6vn[Xs%zk5Eߣ/K]AmiG&yO͡ݐ4 #>٣q>b=0L*y*Zh\8.`*G:{yFfN:]Z""972$@6dMI+yd?ȸzAlU%H!ɤX,QG [-4u]"2*8* Z_M`TG|$/:'kn?7N*hy,\9Jp;OH),`^wK?Ɠ%SޕVk}_K '*h(̎Q&z3c[ϷOgk!P#8ȂCԱvk?st) @?]d4%+ZU@4atEews C JD`'i}F?slbrhV4̠Bؓd^4iE-!f֓⁽[y [BŔ |;?Rw e" lةjery 鿴Jn'T4&v!8Y jn V58rןrm Eq8%Z@Ŧ2OPVDHh^3>QIy蕚:!G-"=eլ4dυ"839 K" ̟jڿSAaS yf(r0(Vd8| pX{4n S BKBzR$O=]|]˱amuvxfy_}]B|;M5]9`Q~F(梔c (v=iK6 GDNSӯ;aaV^mҜ.mp*!Y/Qg~(^xJ)zwr$_8?]>;*ZQoa2WwkNRh:X-U۫ E*c~N=M+zGٷ;3q$@;sס'ɞ.{m[' VZc(OfaJUe%-@ABX H5Ql\Sv#{ME`L&Xz@yj錪&kwHst1Q<#jr>XySieA,X'6h0nKG6uV%acL_"toK'KP+Oti}x6M.uEJغkAj)@ϲQXL?L̏Lh )b> "b' GMݧŎf/O-mM 0{lh'7 ĕo٠g*V"J#w \gQ4˧7!IQk[ s{Z#Ohg8\H8dQ F :oP` kR@d4i쳖kVl ~RD{={CCa՗4(HxZ}\B;lY^ZJfjDU-}Z6'aPMʂ1]P€Ι69&Zw4r4/=+)EQ&e/P'P=:?ͳq4u|U۷JolFM#V.k-W?bӎhD\z6nv׻3Gۇrm7Xc\Wghn•u!kh'~u }6X\bue8[qԲu:_ÎKVT@\-RǯZɃԕ8D"LQ pydX/p̕zz ;pLSpBq.Xcǭ n^0.u-?hdpbm* LM[,aKXF?jqr> -q;t&Ϯru#"Q6/ Jg{u`UQԵj*?taLCކ4b(l("2}Mϒц,AOYX7Y$Gw~8Z3H}[&+mAFݠImno TiwU٢o.K>TP7gsamƒ<=JR$-q큢U|/&Hr]#^zJ 8X$% V e.dp jQs8= ,YWJ1~E ڿnIoPfP0"m{->E8N&fJj=ID²Zz^zA(vjqg Wsf"!BA z}PŢlFTe@1\|`aPsdN.9̵RyXT{Ʌ5'*WM{?,m_T]<:Hl9O`{آ~(t$JE{kdGFt޾T<_]Ed8U+H|w̰޲#sPJ1#5+>"R..֋g2QBSKM"SJR5QtS 1յ9|rX&.s1g^(80Yѩ'-O "9@7ZD̍'Y~N5|.zw-iXoϊV;/j'-=nCt mN7:ځffrU<:="PQdJSs;DN5S-Վm=D "=3gSM€+"/OڗR.)e tw%N̞؛!VX>9t,?*GPikɁV ֜`oTxÌOK-5:V{+U.:[!Dk3(V(QB&[}1Ek uiRyGLU<6!VuQ?Ph=ῳfлƦpLPCTCT 'YO񥃅gL!!Nt}/E8seAT Kݢe2P5ʻ;xBL S@F5RGB:mz w{sJϺ,7UgY~I,?ptpٲj (~Aw[BYe NWu5ҿ0a%gy ʽf(v&҆5aX7>sw7Y3I}2|;32.i~tJ %;XlaWTHvƩk܊1*oGVkA/6]Vs5- O1`3g\iBd$_\1B[{]SM E\L .[qG7՛_T9 D ָ춃'=uU V6q>*,6`0Ʒ1T$Dۧ:^pF=W0?R{'388+HL1ߍ%hxD~kYa1+m#Xw>-);\T^p$B[OB6K_/U@˂MPcEFqP.Q!Nm.TߧaLf-3-~'x$"Vka*MYfNP|E\Gد&t)t}c|ׯalb3,*_ N\pE}]N!]n'st;`4-R͓ku#S _yA6&,`tŞ7znK&?z!ZLZX',؍Wrrt_yTJ=ZTDMʷ Vɭk`Y#u_ׄ$o5٘; fPp8Xe ;7RtYNE2>9Z0>EWҋ>xZ5UbwW{ g䵂%4PDhGΏ:ݲiN2w8vIl1;Ѿ"jPHn=E(ő~X|e?GJy'l֧[l?zߵfM/FEܭmѲ e]dVUxlNܜ`=549Աv _ATt(*>Ȕxa<4b4[zx{1Oӆ90FN*!~$YAzWhJ"v(۾hgXXa.Z NP ~#ή+^7|kz|H)@}2́o_J.ܙK9(;4{^-J,=K%򾰘ݧ!mrhY(-q_t^8UF;~*6N<c?<|7֒ҰY%(ԴHYYk #:ًkԹQ;iab)dt<8N#C-{wec!$N-s/J"d%|V2[HZSÇ$G&B h?(@4L0M\0yn2{#x,1% sS3z :&O)y eGv^zJq dqeM <&`Owz=:ņ=:~<2[ he . P: 9&Y1A[0^("zĊZNLC /$QU Pg7h4I}b-rq 뒕Tf1p>IgBK:3@J]E\O=X>c<#zb{τ8u8jْuψ{dRf$++vH-{KelEKnG+)花=Ɋ3QKxfmќFf+@j x;jϬ> y:Y0X& $z[z<KW ㎾z\H5 ~y0LAFg~f-f&MyB!Iĭc>~ 5୙D_0LUYD.> !e#dG9] 9%HL/6aь6Tyb@ØG;5+%bv`O0ߚk>o'Bq*;Ϲm~@ D{εw P$Gz)ٌ=/*(j>͇f?Mxӕ!ceBI?4)g# mţހXL Ѿ Z!Fx#>&_YG$$چfq< ˌp(ujRifYˆAUL@+矃ݾ1d Hx1 Ԉ^5+)J'?TKŷ !W}ul&RG+Ra{>E`afv:4!(}P̡_|dҵOI1ͤk |D q[(GA&.݇WA%䗝7 SmTpu2RƯ&sJ7ή~^> c:SU 3DVä~M@4.5J۰Q#IZNK*((g _S:gNx4 ]Sh6),yUY_J{F<%]VՠvqXge33D]y-}qD^sG!`+`2,IZc)8 KsfWVibow9`Pt-^YAiB` 8*[ u?D˂YѥB=q/'^/O!y{r3VپRkVM; Jw#UƵP Weſf72DG FK-B~RٺS>JkzC[, T@(tyX8Q3Ab,Oqp7λ{|pN<8%{})}#_)7WTRfG?JV-8a64LidkDԹB<2-o@vvvt}YR#Ӻoؾ"JH- v&pceo"DH>Z\v6eVdJ0L;$?QG-^s eEJ[]A腿Nơ'͂Aݸy0>4ao^׾oj,@7VfVHoRxDʩ%VF,?…Qb_Z`do/$X.hݓHڥȸ8= gO [:B$LZwadgemm:4;ti!5pX9'qEmߊE c{sOq8f>C>J|,d&A؇YIV"OʁEA3{6*'P'ǹ \ʾX=iR]9*O=k7V:_κ;vfln>ݓ:ā/:iԅ6S0lCi:O˲J@v*%Fy_\\7 U=EBn]:e$^Bdoe, ѻw*ޔ=łeEs^5ǛRf3,-6~n#}<YI=nj !Yu56GƮe^ZP7E%S58̷c Bx<Ԟ72YBlQBsgfj{ L^,#ʚǠH=2m8y*儷sZ2IF z\sO2 F^y]|*#uS]w~4TqX騐0CIRM56[}n; tfOd:-R0CGjfD SpJ紿iS =-iGJ,'!w&lUR3L2eA[T<ЅIhxa}A+7=e4 %D9r]~vrp'À]zӶ{_uYqvTNnϑv/=0þZ#U У1R:/]Wi>i)2o-f#[)%W6gRFП"V$NѯȄp!%j{J~9p3d؊{/w4,TI:!HNdJ}3ETsvQ>A5n:bя?}g=q|(*V5ipu C3wۥ))׌q„ r:#s/NDZzBRht~d=ZO$趟˿( P)*i >5w[1 ^oC$EY Qjd&e/U傷IsgՐH?*w%)dn뜳vY# 4OW*WN our/*;47hD>2 eoOh>/{n=/'.cqQQ&*zX $={/M!2/mIEp߉QVؿr=[)& }E6}V{qQEx"5sf,iA=>iX/af伛ȣ^9LKj4kM{m'Jm <3^y0Oz0+37F 89V41S[N"=$&]IߵS:j-x}R(tu7pc2Q+!ow} _լIC%I QM"t"dZb-]=_^_kjyӷU_SBj\p2!TǐWc>fMO?j!S{Bnfy:蓉J%wnSsA7cb'/Ne3mV:iҀ #?#v`? P{L&.$EsHBelk3 #q+N/E kL{B`FY~0 /Q}<XDKqO'ng)QZO"&6Z#SW KyLng)9v>0Qop:R{OvE mf3[ 2QzڈٻAk4ٓZN"K.BHVBX82JB34q5UC,frecnege!/(y[ؾ{#$ϧ![<]) @671~e>9 H8晆DHhh^jvIOdJ؆-_T8s*x쉃_a X[H]f>W턼gk{l>dpQAqu(g|w^Ź8]3 ZEmV1#t# #/eF0UHүI*eU-7Ϸ$#Ck ABN4 ૖rdzIH+j _}w%ilf=uQ";kצYp>K9|)k(OJ*RV>n)c%G]wᐘ FoÃ"َ*>w[;ph?mH@:m3z |ePv7H7ϗ:7VJ#Kw1ݰLo8q. p;6 OqA:e KASŘ! hCO +;=b2̒r]ϕ/(5j Pjͻ0W6tYJ(jE@n3+\aWu"pYCv4?Yp5~Dxae}}$JN6Q O8p[s'ѢbaiiK>kq;0]y ,@ -xO>b(]L|m1Ts"CŀN,*YwgXQGLnZcsYn1gw bR)iS.*G?LSnڑVF-T߯Ew+(HK+flA^"trr¢{OXB%Gp˲ҁ{fcTH!{V,=fR4D`JyivAgK:-`(r#dT|M֜ޚP8a8vr<|˼FY6)Z 3h@Zw*SퟞT&ɗK.zx.Q_91ם&pSkaB4&GW:TQB@\2jw,TwzDZnJ Bĕ @wr8Ž 0{z*@5}]uC@c ߹a}e.@d@Lyz]([l{Y[c%a<e,%ECo4|r=K{~EMOǑ9sA4M^d{r+,{YF3Ä ݔ54BRmjg$[Ƅ֊Y^͡+q/NymŭPS(5ẋhH|wt9kjm%vVͿ%/} 06@QS D5Ki 6Bej.Q98n$^~`tC4@3G]J$k'F_PF;ȿ tD(t[!ȱٶ0{x2Zхvѷ9NzR]]uJS`1K jC' T;I7} /-j$ַQ^!9Lc r==G{PR`6.A F䇞9Akbc"-Rev8 \{Ge7½ &g^ټM4Eɾ0vqlt|2ǟ 6o-%3aB)(;kzn^@Ѧ&F.8ٖ7/qoLߐaT"Q-OJHe>]AUssV338RR(Dp(#8 rvHcR͊ƎYq_+eHB$MnDxi8}oYiA4>_$}[M 'E#4uu$=jf3xUnAI#zcBc<z qpLs|h4?H~;mɽʺ!+=䉕ߌ cs;\^?w`DE=Hrs9]\NwԴsg5ÿTEcNŹ+“:hZIOK#AF QQ]EMݷ*d?/1%w ٲ\ZIQ3h n/Ƕx =AD^lY5Lw'H<#+5 ygFF"|Rt1VŲZMI?)3{!9>,q鋱[RsYYrfU(dWy1܀V~$&&\;"PJ'Z ̃NC^@3 m /=T`6it%R2sNl/"< z>n DЖaZoqTªP~N6.j>E}DH3^VNp4q@Ӊhu' ,OYd& vUw=y;VyReCZw Tг]k&06WpQt6שz0վbuvh찈ZHEe:B0ni7Wa|a((ޕ(CGBf"E8 O1:G4+w(5)%σJ5Սܕ$YJ)5ԋx'%QW%GFH *T *,΍1IfuPk,7d'VJIqoo:Bx~pNO2K?^Wҝ]PP=\,S%]qmoć\ch􈴒-}ג=m=>0DuLTfl7sT&J!@IcIJz)Vɣ4l$FsM=t% 'M070-[]aMLcH݂B9P/co`i+A8{~&6P}JMC)2V*g&6tyyMw7?Z2ф/9$T0Gbx츤 ":~j"ب4VVT&*eƧ>码jMn6Y#DQ[B[ $0&Z'LJAӌTi̜ I`0MpnhD 1Wu(ʑlM;ح}s-2>)맛Vfu@Ęe1GZOHfؾ!11S4}0|{}AN<#U/N)^[sy./ + (T$>[yjkCF>A^MȝHeН" V4iћ|b_]DsrZO>GV ri-Wl ReI-d^ #.l+YR2Pּb=MvK? T-_HU8US<@?yh m\S;< u»l?u. F}BIs yj+wˤޕ?-iox>jfldDaC>I?Gs"` '#YG/) _c>yɑ_rH]bRe~)X[)d/qI''$o+ 1ʦʛٖR?[R5| .nF)}fN[A\q1=*@X> ̘ X* "}eX̐wt4$McU(kD x?Q8LQ}.$bZv}WqdnkoZT'v0r呁/ǑUڧJ8 WJ;'*`½6Do%wFah g=DćlhS<3'Z%Q3K K'; { rNn>_Jv"AX'٘X5h{4&3Ë /6`U[^@[\APA˥\F/~"KvlA*秢`< 67i_yD.y|IދZed!Ȅ9<\Q l pʵeM=%)gwgd,Bմ1euY[EO;,pV*~u.8n2tSe4X`I4\"݅Yc&MvK[VŴ.XK8+{^By'2LwYD=S[^"vfi&o{ r03$]V`)>-KɎPGH[{Xv{[GyU9;ͣn>ё{YhֽĭoRrt~WZ :)Nuύ ܲrki g* Y9#0aDr8ŧ5 4hոU6FM@lٛTr2PLK, 3w=eZtCMn0 YZkernlab/data/spam.rda0000644000175100001440000032232312774400041014253 0ustar hornikusersBZh91AY&SY 6H7<¢TP APR"T((h@ @*p*5P`P !@B)Btŀ  "y(x dúwl%݊ =w9eJ*RJ*T T@R*4JUT{35ϤHQ@! PH R*I$$QH+$(HRJH" )!!%DEJU)HI UTRTET"%)EPUR! ETER< E%IT U(PP( h@@s m"op VzXJx=Ј=86;I:^ $@Jn@H햼M`{.{×v`z[՞8נ4s'1ב0՞ Xp8YdL@02 L #z'h1Oq@P<9+G$"SW\f{(ϻLQX-uQѾ"~G;`I.8UH\2aH +`y"a?Zdv-qzڵzVӦYG0~v{3Ew! >QhC>c;n2P|_3A|Jx[8dGբH`LTg/hD, U {^'soXEys+ga@&~!RWǸ98A?tBҸ^gt@_ \e؇匛? Nc>哂F]<{Ζ4Ge ſV䐍I)Ok^ ,EEsA=ЂF BI;b0mKn S3=JQħ8"#,2c;&P/ox H F"]݅y\  A8k3x *Y\HGɻd!8pGWl⮺|/:  ԍ+ Az8_9I7\ *H#< Bha:y*W&gc$|qC} <(poQUd9X ʧޚUP/@("2(Y9(߱F"+jhZ f{ "Ry*rFb7(AjznjEs-2O.?:wEh8eTt>u |FfsI(\RA'XD' > ǃwOjѯMP9wdI޼@BP&l%Ad"RcRh:8mijiTA5^ ӵo6ǰףO>|yq JRrZ6!4EDIBbJeꑁ0ABt>KPJ~+'>=M 3KaaEzEjs#J7+֫!AvN![4i(p1#8 @G(Jyԡxe;Gum~WJz^,2OQ|">o5NZzMъ`fpJՒZnӕ/DU߳Ǫsv~a A<ूb:HZr\@*+ ^WֆJ[ {MtA"[;IdY%az061$q2B(hy< & ~kBp-w&HJ:N4&Ŋ͏x;WrAMm;I0{(2< 1x?2N-ׂ9#v͜  j,p]ٔ4R~խE}5zڭxo[H #FlK*> +@Ew`~rhHI;$Iz/CP؎!2x^nP`]tGlx)rnˆťYn(;|Z: i]:[TR"j!"Y#q@C%_W//~<**@SCg7w&ٛP>УnMWA%k93Ϫ^ݫ{݂5qYiN1zoy XDYdR{9#i+Zqp,|+ƃx$9߯/c{߸uIВiIz]jŻx?3I$L &|-U%ӽvX-5<(Ba|YWIcLYt"gWҼ!|Y985@p2&3{,#L3!~O] UMY*X\*{eq\7j_cn/η %B|1|Jwޙ~y/℧~jU٠A7NcE@Nhu@n &LR*~ȭBH @78]1QWCC~| (Rrbc"xA?@x<ҽ?9#G]!] I2wgǩ׮R".7mmDF&CA NBodrrJo(aI(N_b1RqD%ƁsA7ko!. .\&nogg<t5m%5 'uǤr>0xS 84-p̲k3 2`7E`Q3 i^1W<,@i$Q\AXb,ЦXlX@ +U{@SRGZ X 1X)t{U.w;x;\)Sw.DňV)g&S;_/tyyz6ZIRa~fL)K#/#߽~eRgešky#LW9ZR(  \bO?"^2x>4`r@v*ENF۲L2n=yr:-bc&i翑+K,' /#fkS?dP4t~%~XT|1s>*}8XuDO;ȮlNכQ,ށ7UhC&v ߭cQk"0бF".=*V cT@q*]#J)Ig^WZje| mjm]/p72a6'! !12=)lh.q^=gK0!BM"_N=6txHm_zk_$lE ?F^ ZhpmnIdS~xcJDm9Z#7FaꝷnEF@vHtq.FvRr4(cQٌ*-0ԴNa{-!M1<۴{G7A~Q`2e"1;M&AAMeTIL%!E߬K̵)5`ݷf n>¬n 㐴[ `d^ '_m(Oc[CeF 2re֕ G`qTptJ{ms6钓(1r;~zv PJJܮ7׽淪,V](^<9(+"U7gB R*ܟV! f.P&}zPamdEW@S37lC4zAmlP$_}E8>{no /*P~W~lXlPk}?^5\=xnM~qX$LeIOc2.6F]i.tN,̣C-eme %Ѷ ɺx{ֲ>-s{<*rZqb($x%T9AMG'eW[>Cx mկj0^X,,${8/'6*ɰ/dNpz8kTJy:Ԝ<Ǯ/F㠔!5b)HZWi̿K:$`>%ۉley$$E:Q'<" f;d~cajkmXǯ\Vt[qw? |$ H+.熜."sbf7U0Iw^I\$LX cG=ZcPz>2U$-C1I#J *HON}0Qma$Hv⤦22lwV/N-C5wwW/ռuCg#`%B'ZpqhhH,Y6~^ug CWJ6J`#?I-h?b)=MQbN԰BE{Tφ#d bu11Hge95[_8{ٳq-^s0qd[Uv AlNNaS3F;MmAg6Sg%T&lZקr4Z^62XvLcJF!flUi"ˍFUFU%jeGuС1_Mjŋ'%P2+164:ít`$0doŁ8jÕ\ *Ty3bfdTkAZb,P՝9v^nNX06Dcoٸ YWpK#ر`ВVqۋThKnLrsT r X׾lcar}ҤZsYj%5HŭSڛ+مnkn= :X,t~>e)ӓ_·1Rs'Fx[c2˃X2m`1%aZgֿ~`Z=J`NTmݝO+6ҽf)>[լ[z57)YSc-u~53aoF}В1_3AYژ׌9sn0mŴ6s3#F<,~K zbaJu})w@IteŽS@|U3lɭkJpBhQ.:1[̟W5 1m+hfU3VF~燱1mxlY y-V5I|IRQŦ C/ M9by%`T6fMz8&}qvS4fhdp*3b#/>>=} |8vޝLͥ1\5BVv&6dmv74rD;4\+03c*ZYdK޸8'r2+eeNw_NT$im%"9f}o ;ͼKVR/s^-]Rs"Ci{(s#ga.[1a.y6-Ƕl+ |}폳;Q+v1U-0J17rn+ C|ZgЫ?^{zܚ65GW*nYsʻ+4iԒnc5}sj+R_O|V|L=})IFչ i3 [60g%`,!)ۚ^ p{!ODH+O[g6O֗ŷ‰CI0<_rg6rqeۛ$chӱ!rM7[ eO>-cW.%CۉL-j_oX>MO?%9o ?@'>Ċ?深b<̚w.26 =ɝ^zu,ۄ'YNճ έ?GhXyrh?(~Rzmƌ'i,O~4GV>27Wf7/gqg]5̷/})Oǡ+'fZ3> mM/K Ka 뻱 4̜Dl}iœ蕽SrfÓ9dMg'pTuiNE*?5!W~cw!LN>zwsϫfd ,)^ox3(R! ̒5nT9C8Lew3r;Hsd+H4p.Vu @ᲾV6HWFUzyUeJ5M*zN#6q9)*ׯcQl#~1 ӛ%dҗƉ\EY22KrDB&M{~DOҢ[Sӕisw,N%B[`YN*.TrpO .+,F#rYEEX#4wݧz0z[L^8]mLڄ5=B&P\1C)3=Ĩ@`G EfL kvύĩmwoH iK@3.|kQ.7lܮu-ϓ.[2TΌYhXCFڑ.k#./id#;iyeBM6{gkU/ V}+!"SKU8i AC)H$: MaK⣹?o\5RZ6~?SS;{h6ppJvlYVR+a 1Gwŭ_vUaMȵPNw;4MIx.qY#fαz) ٿ`ȇ_ḯOm㝦nV б)JN\U%mih8Rg1 ι冀٦jCR41q%mBe9*wp?V?~_̝o^T,6M)~-k3H\A:Yn%X4.祐' ZJ9;?lm87nɝ+LJ_q[h8 K 3[s^9 jӪ T h"Ks)!I|ߍo-4hAS`!^ sivrxqOzkS7]e1.E &k N nṗ|l}:\~|RDWv I.'3!Վh6l76d/>^l>;_8tL%|EJHnHHN9nJ&g USuHQN+N -@^2/lފTIwpEi٣0uH {-˅Kd_-zA9)R4w)l1,,2_)k$*?tdŻ-,vY2l#g=op;,a#Չ߆nb0!]J[;BIi+g&`_S9qdaS}yģzk֓|v(j.- oaq@WpKM4uX\^Z,&QDܧ k}odH>nաeN̪c`AX-na(uƈe#Cch ” NЕ1O?|)~JMNi.&D/U 1j6f#K6w֋Ŭ/0({ !d=(Vz7FB}]^gwg:|%{4alx1yk lHH$bmM2q[>6":EUPN0I9’RuXJLȮ6~o՛1N @-A (뭎up|U '؂[<"J$Zp39|Hq @^$_Sԛ:ԯc4ZUbJrj 6&|;^Fi-C  nѽ9Srl%31OȟXHGA^M, BSI\^ɛZC"yW1mO=x%iEb Lʖx9E υ32 BeKߦ3OdL1V}!NI{enf47;+[՝sx[(ƁʒeH|va=Ƥ(j(u aծՅ H~fA6cBq+Rs3AKVq pLSѭ@{C):f-Xg*xd8.vpmՐ`.93kV"`y :ojG&!Uk$b3< S®F5sW9mlZA :3,u x63\RJ|O&^Ւ X߃3լ6EHO,0s'^4L1?sُ風=~6%xx]϶Ǥ@Yz:,frC&gG_>8ҺMFhd?;߅|3H~MhE3@g_Xȸ?eU r$eF(W&!5j-_5T8h}ן*qSyNw蠭wyw}MZ oKqmbF/nPČy_D ol( "WǗb~0rb+[ ydbdz]vBw/miSP> ";K{)ަȝcFEbP< zVh([ pLdGCk[6m6ZlCS"d6iV-l*V2,Ѻl*+>nIz^ʼY L4"icU>Q<ŦȁVYTC d{(gW_7+[IJ ~fNvPdL " )i)Tܫ.Vs*12hu4Iô̆npd 98)TeXnxۭwv%fgccNE`UUeȺ(tpruhZ$;1b^4D'g* :2P;IELqнJNs,^gv:W׆eY҆ 44ɴIE `h.qf BcLT),=ڥ]420i﷩v2mhl bCۭWF^Ժ#Vn$~6vJ'Yϟċ_9&4x&L,Bɔ 5#Dô@3:/u7i^iM0-`Kn)DМPnzzn|}F7>Sziq !ٟwPV=>t<|9QՎ&0B5R ԘȥP!ɕK7ȼ,7TeJsn0" `V)hnq}_7Gbn zZѢM"-FI)@AI$KW;Ұ;[ʍRYZĔĎ-E2(@ K5xޯCS_'ɟĆdbG' ϼZ \,]IY>EbJ-}qv+4I2ri"D:.:B '']֚@j-%OPُ"=IEB߯NH'3}+j;LIn͞<% FAk &6ۻ,8~򾞯n9ƹ%e rKN^¸ Nq=\lF~#6H=92_&܊d\,9E%b&`^˻ Qh,6[_8d7wrq blj3H:oσxB9(lHFv6Onfݺ[I[ok߳Q:̄6!/qW/xwoNmNiMaAFޭ *j`FhUT-|D4&X-^>7J**l " :z`0YzA>]^,SL{;  KfY u܈Z}֚>?0MC *]$jn{(YV)oU"3W͛5mJVX X\<'4n) dȐ8A Z^hTcf&O!lhN̵ ֳ'Ƣ!kcU=߱ã=%U o:s]BEka2Xƕa80Eo5c/F^cKc, t'0xqC\_SؤCR 0E:OuAW N4& YEdo*z!FScSr*}z"Ci*qH cC(3d7k -t〹X*\8~/$@cky $Ɏl8AuM[vcx%"PfP jԁ_ ƪY{vPX]}+ | A`6Zn6Ӵ6$&Lł 9reˊL9T+53f]( I]IJ,d 4 B5ŰY1l'"bM(VЂ!-%|6dn#JZPc*`LEK|+萆b6x)LKTo8?Y)4N97ozd`ŋ/_uS 6_H{ൺl%te3FLjɃ1H %`ժ ՝=wL Wn«cU!lbܫ+bdVV[XLh:sbu3s4:`_?Fy.vV|/0 nں^ӲW(%z*7?F ))1D4h Xe]/|g"~5*ÖK$|8֌" ;ċjC^vc9} oȧɭ} G!0wMLc'yT &5\sh;6.-<F3:e*N1ː38!aG]3;Vk9k\; @_^c}nzuju(L8l33Ij(E L2QNx-'Ap*&h#W;)seleGvCϘ yT),jZ܊z9!z@g4D,Q .0 A = A4|ՖdL@9fJ5Zќ\z~lG 1d<6FmrpPK[*\M'IAmYfkW 4 #=t#wsc"|<)MޗVhcƆOb{!9ԙڸiFwƦ^ @؃5\sVTm<^-k &IMHOGesuvkβjAI2}/KM/>CnTђS4onOM. eh)_o4wGw_IOXi)ekHmvyl"Y43/\zQehhu< He[_(Br7Zͩ6\ -g9$%EG̷w~Ktc1벽^bxOtCZgxkߎYjfq/ӛ.JeUԡt1jL,J_ -\YENfNJPJ7":3't P[F8[Uİ#c{Я' ME5[WJ[}vs.JabM`Şda'9 ˱3N{RhEmpиB8$4i=- T~oQZP[']A^ [#A~ʞ5N(duV>89O@eN*y| /JV0W鵩D0kdɭdGr,Põp\#y4Kf&#Rι76V.'Ea֌e~?PEG;+&•SGhtu+-|2Lh?DeOȞ5CE%fZ#\&̎@Fk |(_El HSYQN&,'*2Ε[Ww:u8WzIQxz޶ 7,)1j(*RB,df &#x[7g8D,)4B +5S , aK@gYe y<Wi?`?_A_4k|j|-@tdizVfd4H!-/1 ERN%XJe-V^-ɸ. ~G}o??/0 DǢBKGd>vy ~ o{!Ww_X1%цv^fU[jJ zz@]/ͤE6H{Pm )T\|'l*n iqQA7XP F@O"3bm={h1vs2ul9wj(ӤT&XE 0 1 g1"-W:]{]TXHuu|\o ;y).T<Εe*-Cy31zјh bݝ*ty;T&˟ͼߑiAR- 7J ik2a!0FBO&R!Ձ~,ss6c˯uİNH3b}1or 28k9 ,*ˈfDZMZ*su֐, p F'hMsBDRz}-XLqrq4%H*+JJ4_7]՛G!׮6Gpp `ŝS '7e4{ԲX|~22ݸR6VRZ ګ͕Tt(nsIe+97-;k (RӊWS(p|MA~ܶfdh;J2kjuF? PWɈny1]*!f}Ԫٲ*V2H}k@E#,UNC3A= >f#-Հ.~UݹYN(YZEX~;Q~AoGv0AV-,d >)pr+CZhgx>x/",Z( {}p}/T$n4 ไܡcQaAV}dͷ`h %ꀲ8s!2oV4VJJ`r~u2-dnAD'V5-z*qU d^%]X_똹,"MdhK+GN~"n)/m_Btp[,2M`^'ki:/rn6V Vv16Bŀb ј^p^7\0in!0T@H"!EZda ́$z?ggTﳋ{t\V!meŅX-"oCSO0KI):0+9NC3ئ$V"y}~Ĉ)yQ[?W͠hbda7d x]Y}'<ʝcl-!NجmvpܹP6xe5ypэf8U>xyp:1d+־LY']8[1VIkR[6kZNgȥ,V}k4Q?$,cMGz9Put!ZkkOvNW)(y$epgV/vb .QaEqUɖCv=9䚴r{WZ/'bUP¾ "`8q(o|-˧] tn_SZY]ZgVe3RO2(Ωǐc},1q QyB֫[=Jx-nx?!q:~rII2)(ݕUuup*ZꑽH┵<1֬QE4+^q>Xֺb&2«*׿tRnUV&zQP) -z&\ )4AU}o ȀD1N&RcY1"k&|1뻮rR tXt UexXFmN6qYHZ# LX4k&lI1}؊ 4eS,T /J2N # ?aHa+0ix,9,ҽ/\f77]R,X9hML~B<7!љ>'¨{#ZKttm!llϞq7Z s)7ЖPqEzS+7wg'i4Z6SC: A\p^ <[ZM)ԟX]%H,(ãQfb9~|J#@5>FHJӠ|g,pW=MСzd1Yn\sjvX.e)З:6&@ȅtNN7( \}Gp2kDini%u~<%*tښ .֐%*-S)lZ)+4KW4sqhʌ"}PkA\X4(.t|R .SMP޻ ,b5}A3BbIRo4<..[e{0ܽsL D0o5T%rBx3$037 *7&w  S: 2Z }QŕZDW-{㊜W $.\F-M͵gL %JPSb榰o " 9AJ CŃt! *O~QEɳ&n3ŵH L&ɩ h MxgvL&%H'r wx17H2Csp!:9EdrfgRq2r0Q- 4C݂8 {{plC# % znrӓ:9u(Vm.F+\S(7Ҳ| ,6!p:3gу&np(ӱY,;V| 9$SbgnƓ#"sg| ִdQ*l3}scxarj*qbqRmTl_-0U|p ;/`g\DAVd1[!&ĩxE UcsBU$dus/J&S"[`k |[sߧ&SɽjecFwS#kPQH_%Em=a~W7ScV9M4hlj(%=i ުdk/p2)EpF/dHXQ z-WX';BBpp$Ʀ2+R%.' Q.9`p* WFMwe¦MSIm༓AB$|-V]Je-9wL5?V {!N;h_3m3I @vNʳc[ءuB$}†^m.SWѩ"5De OYr͵#%!HY \1XV mA2CJA 7Ao ¶7 V jk^|3о&P76FRDxln$jXOvO6~/˸Z}FQdVL/*B e>Y-,is)"ψFW"w1iÅn *r ĝb^xLVJ- X?4P?{97O~⚤f4;O LnAG̅'WꛟbPt7ޘ`Zf ҋ4D.UJ쥷%E*NB_rWKsЋFc%2I6lx#2瘀a(v 2DCzh~`f܈6M > HZ7HU&rn[k 8Dj#+*#l88lwԩ/m|Br~Z?JN|ŷMחߩ&K*\Nt 0 "0G;{^j%$PYc^0)ƫai j $CWGCutGuqn[Z.c3۬y쇔ԊHuTdt+.k'v6l2[S.T"$Q&YWK0g#osnO||i̡תIc^:&LW8,A~ :ZMM+a4JhV>ϸ~`S<-JPApb4i( `y0milj¨\9w!/8Ó Fe4i0-gQ͛938cC6;% yH2vٱH<(:V'rc ߌgUν<*Rp{BN!f'wCﺢ0A?h{{I;<|36:T7mhf k& -JM pG ]v0+wa4 Uis)Ҍ[ەJLl%\.6ާx{ \ . {tz>3oԇӺbu"\ch2@F.R1; kf>y*~өPjҸLKR_Y֕V!2‘\%U 1gŷX\|:#'{(]pFDƩFE5-O,G/$ QLAd%\.w>6-kԖT8p711X}j:@5a^S 8~-=ׁޕl4Xq ť"<.AXJӯIJXLxΧ8rn{!5p"p]j%du!$$8,|[ւ 0# Hu?ߵt4(| ]B]Or k-zm׵ Zο}0l{n-{Unnn`8([됚L]wŢ:<,\ bCTsNpI]u#|!WUn n@D.xY.]׭g\ɽAs4\3#k,II'DӰ ~뱠\|,]po=/`{wVPB0Muv]vcSP8 -]//{ԆvU c~3U쮌J[QnW,lݿW]袁$.$'QH. _\Njr@\gQpf50!RP70и45Y9[vN  Mv7@4#mV×ەǨ2KS[vt8²olu_"c:'~+x[u\1H .Xq$"0Ў% B 2epGFr wa ՞Ho f Q5!Q}UzƎv߲|UvqdG㡆B9#"g.pρl]U2JPLS2PJ̠ Jvφё "_UHцN)3NC>!3bPˆ#2ѡgLy"bA5Jt& ؎9*fǣNA˙/d* P&]]E gR,ѣUJPn,W=JY@THp!"E/E@ ,PV̪@(`"*˽r,/I&PM*(&aPEA"fs߆=?\ SgP6@P3$ "Aتk=o?#|9sϓ߲{=g=@PzֵkZֵ0\1CIa(Soתő=ά&h P( B(6ڨZkf-YKVmmZիM)$4!Vl13UZlVX5kZ5SVX6*mjRMm-lTmkElZe&QdWaju*(JImj+VmdTVfRgHmTIfVkflRhjر֦jڕZZh*I%wZՍUU6TEAPPl-@/@"0!"Hۻ;AMDFn$,QL*!,Ci42Le 2ĒfdLiؔ4FW\3^9Ȃnqˠ2iB51 ,̈BHpƛ3IM3BHbH "^]rP ,"Ăa shW:$lDQHb J(yc!S3" wEo;ASdw\$P1bw\L e(뽆VwoW.-uxcFBU-ip:U #P!0E@5!# *qۼH<2s9]7Eί(aO2Nwwxw]ywܸy*˧^/3"<:7sq!:tsytuwqsvl&f:NGxΛtuȝwwgq;syj]yxx.\ܚr|%"$-WB|pI?]hJ;(kW4]U J}]RZo5ɰzOO=-b* ("cUƶUZET[Xj-mEZV+jmQVUmcmmmj5QmmZ[ik$#j֙UݶU+mWi&X|~+>ުЩUD;EYxn_~p^˾BgHH Sd EU"A[H6"@"غ~Ow誷̎AުB# H*!?u bnv&+cdvS̬OӔy?SKX;)RAD5 wX(Q]T"vՂ䈌(Q#}4:* }j-[G%73h,x7Ix\,i:뼹FƴB`׆)_s sټ(fY>飦H|СZ#s7BdsOT+ܫ%LskNooc]jN@l!ǾoS P҉ 2,@ * UT4#"0AgK!]>k~N5'qEYD\_k`5ߍk3t|*.zN3RTI׍rVR .e+Y 0Rxj ;g/+vK7jgyaƎ"0 \v ov{By hDPa%Kֲ1ܭ- 5KXBww֟~ ( Λѭ X9%mo$`Pvdqªo$[&EoG2phc4Qfw]"W}\ۜ [Yke󕙢(=A&iQ o B$lpkADi(klnŀϫ.PSIX#Eie?1b0pD2"!/c}Q4<\q۽wb , ^VԜ7ǩ}4yEXXp 5ڐa {U$Qc]W|0(/,!<^;F/H6#;[}Y( 1,*.A SF*S`&PAc}v{̡)?xfUs77J828C.@*-Jz2`)B\%JyEDD&Kt1&Rj l! s,D٭CK_ Wߕpz+;&Df46,["6jQ$ʣj4"5f(K$# kEDX%%6(Œ h(`KX"lFcFF@di2BM4hѠ(cD--@j#%"Fhh(e|I@RŽ*]w*l*TF@ Umhѭkj*EVTkEZmTjZTjMZ-m+bh(ڋEFjmkX_T>,:B@t@-Q(?>t{[7Yw|*  *6ʖP #_NN\IRF/3W?WkSQ>TরC8"RT_>QB=swܭV2+/1jE01rZoa}?W]~&ˆe]ʶgauQvD2AT:Q`0$a## "'g7VtQiHB7HR6ƚ$! AQtEEQIAŒ2 y濦Mf[eoIuV@uGBkT|[Jt Ȉ?r e6ō-mJeS 0Fj1XX1Y$%v}o|wqb.FD"QUv<^.B?a+6;+c4w8"&`~7DP5]2,{x;[ۜĂAE+cTj[ZsXZQƨ`lV-QcPmQ2Hأ mh,XQTQbV(JE5hUŨ1h$jbl"*6IDd4Tb5D[Ih֊J4I`EXA65%EP #X5FK(4h6M[Tlj,,jɱ4U&e6EQP64hIIY$5b1(PEfXccb+ Bj66Ʉf aL#c3IP`XHbDZ mJ2iH2  P bUZjMZ(hkcTZQE[-~~=痾;}e;+ 2rTFФJ ћ!"MV|7PZ9r+5k؂Fx,e__}W9Av+)4ьwTBlC!=TNJ^=d3|r]VHdJ-"jQV "YDYXTj"QT"AV7X69`IvgY 9 "6պݻ[żĄ~hYr,TN>~s9,jkF{:#tAy-g}yu+;פȀՂC8e Xc5ø~GŕSDҪI5VUdX\!FyWi>Uf}mۏC{-q(0$Bih /RJ|& PL" Up]6sq^2@0{q _*,= N 'Q62-25220*\P-+CtVck HZmQH 貕 A bPPPpH q*o?jT]SnCU?:B?92#%m*͆DAPd(i͸ZW@Wэx@e@DD\H Pp{ *zo:K'e}r7P "&U<{n%ic-P]G*7@-VnͰG+<ۧ_;1hff  fъ Y[ozZZo@=+6`DoRL,M5>b$,Wyf3h oZ-jeWΕH!!R&Nl؊z~Љ$/UeZVD@C( QB.$Q<ښȱn'w|帾cuȨ&#L`E(k0ݭn[ܣ9TE;pv9)s8%F@Ln$auĠA;L2> HX(ƖߛUzJ"|N Y)f:4?X&/-X XŐ (pDA(ȈiVq#*jGuq)qp0JEJrC3)aZ( 8; `Mq iwA\`$KVfGyȈ0T+ VܦN-$ۣr`Cx2n,Hp <XF";k"[c'.f3G`ܼ¥Lc@$tWReR * fVG>s}|TyWjkS9R T<=$%{Z B.GƤgғ@*,DdP:9|ݭݞhj޻r-lYdcTAL1~e8ep]Sп̳9 $%V0˛ۤ2,WV5N:V,hQ+͏Y:8%zla=}pl9RS!|6V=laEc8S8W(Pl!PT40ג`QQ ID#~Xs+XϘw1bpp^a xCO3~} 7bهw,ߍ$XǬ'J^v(P.]+y56YLX[h 1g^"7N r;YC*Yv/zCX[4B(bu^80z#> qf|1ۊ6I8\t93u{F:7oyy%x*ЪJVy3hyA (TF)Y4ݠT;kN$3=qb 44)?V%$8HZZyH23vЯ\$_تo,}D/3&2벪ǝ/OLj9IPfk*hw,M߷:8p7gsH(3zx ,^#:EN={ÈzDW :e ^uUZR匛8Z"9a9ⱨ{;rAy.3; 9[Ϛ㨯RfTޕT;{s{SĜNaE {TCc>BO˜"l}.T׳|38vH9 s @6O)xaIҾizd!LYY4QMdZФ?Jj 5L5=x62}cH|p4Pa1kGU0R)Y5nU56Q HH_k?qB w{(Cr|y>)R-[r[i~yKőktං2! "W wL[(1"?A>T5.8YnZ P)D4Pfkfl'\uZ*2U|w*>]{ݞ2F4ILRU]~!# a'')uP~fĵBEuCOq]*$KBX KJS>+?\$H2I<82 IUk'G܉_ܟOPPyFop|~cA&,y{g O_9wg{=g|ϐYb};]#:ڦ]E]} Ø/'[`o1A ~|O>\>噙ֲI IH0$A$ $`I$I$I =u]kZֵyw޺33]ky0$A$ $`I$I$ffkfffkw}33uuuu$I IH0$A$fg}`t0$A$ $`I$I$I IH0$AԒg뭀fw $`I$I$I I35ֳ335ֳ335ֻ{fZZZ̒A$ $`I$I$I Y]fzw}$`I$I$I IH0$A$ $`I$3=;fw $`I$I$I IH0$A$ $`u$뮺{w޺ffffIH0$A$ $`I$ZZ33ӾfgzYYYI$I$I IH0$3]k}ֺZ^>G}ϟ>|3>gk_}oֳ33H0$A$ $`I$Luuu3=;fw$I$I IH0$Aֳ335֮fzw}<ϟ>|}I'CI$I A$ $`I$I$I IRfzw};XI IH0$A$ $`I$I$I I3]u`33uu̒ $`I$I$I I5fg}`ֳ335ֳ335ֳ335ВIH0$A$ $`I&fy}I:I IH0$A$ $`I$I$I I3]w33uH0$A$ $`I$333]k333]k333]kNyfffkfffkI$I$I IH0$ffffy}{I:I IH0$A$ $`I$I$I }u^k]k{=>|ϐxu]uֹo}}}}}x=uu$I IH0$A$fg}`ֳ335ֳ335ֳ335$ $`I$I$I ffy}7$ $`I$I$I IH0$A$ Ԏg<ϟ>|Z뮺w# $`I$I%c1~wކyk`N\cPRB/ªȂtUB@hb<$cm: xb(5(t!"wX8Low~!Gwa-sbczX| c׶{$ wv-q!:P/}˛ɭq`×tYdtjKߦ+H!"9`88I ;wG{^+jNo㬆Z*+ӕJ%HN~w=;^';żt{}!p%)JRd $`I$I$9$7ߛ`;]k333]k$ $`I$I$I I5ֳ335ֳ335ֻw޺ffffffff`IH0$A$ $`Ifg}`NH0$A$ $`I$I$I IH0:L};]k2IH0$A$ $`I$I$I IRI뮷{yfffkfffkI$I IH0$A$uu3=;{Ԓt0$A$ $`I$I$I IH0$A`|?;AュkZֵkZր|ϟ>@ֳ335ֳ2H0$A$ $`I$I$ffffffy}3;ZZZBI$I=v0. $`I$I^fgw}t$ $`I$I$I IH0$A$ $`u$w޺fIH0$A$ $`I$I$I IIY뮺{{~fffkfffk$ $`UUH0$A$ $ZZ33Ӿ{Ԓt0$A$ $`I$I$I IH0$Afg}`̝ $UUUUUUUUUUUUUUUUUUU]VN337UUUUUUUUUUUUUUUUUUUUUUUU337uUUUUUUUUUUUUUUUUUUU\U_<337{UUUUUUUUUUUUUUUUUUwǯZ}}}{?>|,{{{{{{79ps33{{{{{{{s{{{{{{s{$UUUUUUUUUUUUUUUUUU[8${sJ{s33*9fUUUUUUUUUr9sUUUUUUUUUʼx9feUUUUUUUUW*89UUUUUUUU\ǎsfUUUUUUUUUr9sUUUUUUUUUʼx9feUUUUUUUUW*89UUUUUUUU\ǎsfUUUUUUUUUr9sUUUUMmmw'$3C_HP!JB1+wi/01,wf߽LD݈}I?/?Gu|||o? P2xsʪU9ps3*W9̪^&K(rETVcQjگg/~y"RpNAH p*Ba-}/!N?qwy\uao=TnWN " 7cR*"tXƶKV+c[hYIQDu>}; ?QD> X81@f@ꀓf>e ^p8H @r!JDɿyK4/kr DlkxD#,º` }(VK>txLw:bDYo@ͽ}g|qyXR@PB QWV2ML6V"P =e;[OyhDVP6?cR*t+#x8 2c}V]rBn5pư\D{kkv 2좨/р$D(ch@ !H)CrL!WB?Q)+EKً(S$:Si^z9ij#kbZZQV7]e& ]]h" ~s*yJZN6=]AQ䣪\7>}uyPOh(&E=N솊{Q2&s?(A3R=-Mg 8kۻ<0܏t$ΒOh}c1 ;_!`1Ն}(gM02 oRyYTx(\Gv!\˝P\+5_܋&0ɼL`Gpu#me;1uaS{ e>o-{q yi%(\ S%Wl,RO|yٝg>kҋĨ,PtoGݫ/^8LyAzb 2PoHSAm! ?ar0D'^k8Z3܃6];_5&rt8?_MA^T~o/kgNs1 v1X@ M\&+lLrE;t]|ǠĄ<LV b|+{?EGL#.y2ID?b0}?e`U⼿# c.CI#Í#hpGJu7q1ҥbgGzGT<(Y5NzCb<282qt&(D';|/iͪ ڑҤ&a{o)cc2@H 'ƀ`Z^9 jṻ- TM#][2w!(aHLA8~*1vxUcĠ"[pFPw3yΕɌS+^<0ROM꓌zF?pE _'ͭ8oI8Ϯtai]K60iooe,Έ?ЧQ5ӹAꎉ37yxxt38Ib= ((Ȅ"e!T/U#M;$!#է*{V4X)1F ifa'.>ؚ13fYM"wocx?%vs&tS̞ko!oOrjna{&kdU{j{ךz-[ ɶ ȷ !Nf3`3xnI?f^3iq=k=P_9VGR@WXn2V;:ZjՖK7zxqZEuJz|*/ˉkPa|7C{Q`}q1O.g 'DH@\Ib37gG&~ S 1dH#Vb c#Jmn:! ׈ѬJ5M4HGXR6.TtS21:päE?bݙ>(F{k-2E gcI@,/⤐ċo@ G$hM; iq/ ($έQp)gjNAҬ3/f2$$Hzxzn%.DG$މza1G?=N䉿ġ5I= +RA@r؏PڷЦ`$&oye0<! _c-S׬AH`Kђg5y(ڟZL\I ~%]SG(n*2NntvV~՚Q? VM }eS xG/Aw' "S'n4IRȓ6Ley$6T%X]=)L2/H \މ>v9lUj H .߶|_IpHupqмHָY9u.nקk5Omkd [^Wg2t>vr1b@tezCit01 I$]ѹJe%ϯwݺ[khFR!" ,7n\2. FC ü(ǽ0KxJ/߯!*Q\3(u l1sL4ma[vs_BS1:JjF'vWLyvU/& Q:0 +#du-] 4B(!Ro-Wm3nE>rFЧ%eoe|aMBTiV4xEr 73L۟)T^,l+leJG 7xx3x_VPkUJy36/H2`7^^Hc ~<ʏ$psKDH"s 0oK^S yp5i뙧;QR񽴪QJ^ۻ nk[$aŋ +vG>לL*SaHoTܲE4✄!ƌ{ܖ[{#W0uut-gb;b &D;+W>c|߶YZ &Wnb@j;?Iz_$vTpZF"F%|BR QCuŦs[;} }Mbc QjnS:4W^eW6Mz$M-W'd@3Mzvv;c#-좵T/VTg?Ѹ~- WA,tS/9g|1H&A]OCr) K?sbɤ#hhe0kp<1w ]J&!$T7p2P }bp29Ziz♒IJr1/l 1&B?(sTlB,nIJV/vOMZM pRIV-~pElFba7zc<`C_WWU-GtF*l4|ؒ%~ _4r]+|R~4aGm]8-VKZ7ۗ(&*v#ܗ%hק?5Ӵq=q%4,J} h߯ʍZ9U[_0H[vL x~/O,(#EZzwhu+s93j>ۂ=PF?}_Z_ΕdՔ/2a$M)/">(Hrϊ7'§?ؙ<#![\ǻ!VCmrY?y  k R.!ė P ~u*Hzo/7 &$qͿQ`ШSϼ܀oj_i D\778I ϕђ"~X!}_닃E81Lna}hvGKcC&թ#EJ]$߆)u(d:L/s Yy^ZG4am` /i>-(X|#6 @PeHS99;=jM7Y=q_TҐ/ի &OIlpŽ }jK{b~tKDt"4^ù#f0r|fͮh,puWyJ-"nSCGB|PD-GaBhT2HC[&dP9 !56ou| }3Y{P-iRdyqݦPLy! #7&LR8$o?v٘Oi|) Pc>4' S0/ܟCWM8_m3E>m;ÞPnS 56\Cr_% 1~==!:me eDTF_AI6Hw_Mݤ B8AbP+~+5}٪=6Z5F2[0/A]PGdD6y]Ԓ^8_gʃ\$RN5 VJiH͢*&^}z2gcuˠɬӐN#,RrhAj)d嬆8 Es@Z"L=[%&: Ύz IRG1QB ` ق; Y}(?s}n, y}{|Ne\z96x0Dњv`@GQ>돝ooxGo CoRC_+i|1ΚP<w-.m+OEXP:O JG<@^+X4gvJ) /;YnYݲ%"ٵ5r2RC=;M#h`B$el>;).b)q2$AQ١rj 0h8{C[-aOW"y"JK0AH8XJ2'`=ѩ1($ĺN1_٥OFc?\S\*øs@ՎKJdWGrL,FI|ZnQwPHt_ky~ˈ |@L3g#t!wMpAw>f{Fom,C7$b :GlPgV科+2~'_ۜ*x/M]ol50Ssu-F>_\.? 2@/4@g/OdߗzIQmOqlhtŏɟ|=([5V 2Yew}hhH3 ddFq$9hU#Bu?{%?Oa,[1o0`vs~%K$ZTd5Q|H{wKmlc1x% Ta$8\R& j ##| '=s:$7ᔾv2z"OrK}s:-֍Y8:׊g){ٝCɏ) Gf-dU Ftٸ`C"w}^n9~cr7,Z÷9͎Cyn'Lɡ{T$o13΂Zp+/͖m1g՝PlI\Tk6W\.ĶBGRRtަ ^F%~P!/I?oL;H;7 ytoGlȷ>8>hxHHG0]7cgPA ,5gX_*T.o0*PK.ù*vw/w|?31[6,d ~05.ZYDs`SUιB4ܣ sKldњs?,9g7y YPM:`5(fFA9$dB>uW=Z wF* z'(.gNPUssjߗ4-NlNhܷ kdK΄㨸an#ȟ>`'Ɣ`( 5 fW4}0+8#86r=[ZC7>CU!9m3$J l.{zT]߻G#jDyZt͵>8r8Yi!?lCҚWėח-e|Nbfs i?=Gw9 ̍Y}<^˩8}(8c3wD tQs?FmfvZr̝4߂aRɧpq?c;]3b[ڡԅiri'MW.$7HfڧdcKogNAD>eG`b!1-R \Ұf|i:\ح|͠s?MҒr*F%7^5ޔ&ka?0$Y2* $E@07M|X `~(0sug6-VTz~agQC͝z,@&0kը9)d @$pOQ f`r0X/`\ݣY>\L?i/`h)^H6b͊g4!>7KVTJeKg=g6̐4Ib~l,QnȫLm>NR1n8KBGldCL= -*hTp p DgsG23/y X~3o7K@+D@ZnqnF޽:o5gS:0 X+SDu{^Cܧ `O/<$/ {Ɩf:=&fPM0OKl>'3^1~_U>U<s۬~rqf9黽d>N$eP\Y2xh:y,i) 3.A0O{0CL$7㽀1$(׉<0STWx1q_ZZ},` `KjNiu,~˜03|-k۹#r^ݪD3HTlA? =0p4/K=-G֫D6WoG_y"s<#z>yM/ʦѺ7 vBi"tGC {<g ; C_ +bQ&s\cM'Ib\xLF`J+]+Zd(F- f8(N-~i:J;Mbe? ZTc2F$o 5i&)E_01Cq~oSIE WZEG(FZΕN=-k^fj̩oc-=~{kqv[2$qu= tΊ\1H{pË_a2B9$O 5"b(8|Xj_7FsW w//X%qd= L&$Kr 6%j "LqD!W1wH(Rnn$\g룴j-O>^ڛ'KxsLHLPf.nRMq_~pO;᜕cp'_&u|Ucs#LґgEzx(wQ.I!g>g%ܤ^(2a칕cdV̌hj5{RU?^ΫDG}(*raUفAY["XpZ|;ۿYo<$Ay%6Rx$0sDB~\%pk7ZuL!Ȍv!)f dj:ϝ}yLMtI {|U%`o ?q56&ryԑxB; enzy$tgcI\rE. !lkO>3NKuSq&z#OئU ӴzbYyCU?a@=Ff. A,sGrMRGi."BX#@x 7H&fl|OD>7S{<5k#./1 CAS|^rՎ]#}ӸpENhyzDrDKڎG!.weEC*UM^}qRV yD ŠQyP3/ͮN$Nuib;_s??e>&COJ>.)(+Ĩ}+ cUƇUF?.3R4+,\6+ʳ>lQ%{@a}X]"qPؗS2Cq4B|TZ .'.Qe[Wh*85a4Mٜ*0Xjʟd f=Nױ]GXO,x~7G|jEq#x>aH{?' uR0cC;#{ x%,-QBw4q/*{++D9路KG^QO OaڍO6O@ac1y(@`yeLRh| STf6;JCBýu&Lw -=hI_L 6!g%Ʀ)1pCܵw%^u"uRD?{ЅwAg(V %!W* e~WkLEv!J$xMp W RQ4\&{$0ц8'g4@orOH4a=VH !5rO5(D9 t[bu@<_ xO+zSQS1Wx򼔜6yu'oɞYq~-Z#xA&Y+ '\|ͨ" íeTd7߃Cۋ`+.$J3QAT93LaDw'$\x:&"!@j]/q()$h] =əLM(t#=?bwYǂ{ٛ˕Y?e{O~.gqoe0-ɵϸ|a- I6$g1͡8X`"nd8xdK)rЀ2&25;1']u6S3̀f`q]ц+aSY@ɲp`"_&F_o@DŝڂĮ6|f h Yٔ؇\{7! ~(4(rGcMhbެ NGy)X/\~RˊnW)kG֌YĽ. $t6汴pXi(*gVM vg]tԬIlNYCՅ.:j:ջ> qCS|LضCFݣ_;i#Y:Jx쀣K<.HsdMzh9Awxv؏ˋr.WNJ>'麗_a^>WcQVtq+͌EpeH>V#f*Hh :IWLdkb$A!O‡Jj ̍`0CEY_kb" e!Q;z~{ue FUJ9qɋ zEPphShㅽ].$"=3A1$fnC%%#@eʡd[21r ';;A]ŘwüK&]{fyb|&xGP?r19L}^%& `oӑBHGi'J ^E G$6uߠ@L(5 f/6 BIV9v)f=|b0%5JVJ_Ϣ9|+zLD3= *C\DVǶH. B '}8)>LyA'^z(S^h$t."QzF j VW+*[=Wnq&]y-mr|^ 8N e/Ս?K1lBխϸ4CL&?`S"!ew7 'h"4 T>W3|:M W G,#KBQ#y֧vӀ%>q D uj؝uui߄t&XА\A3$}ǷQぺBwr  ';o$ybf{[Bq;LQ {VY,3nɵ/^N\F:UuȝLjLfJ`J|FꞬZDZCLn^2~yg/7bzK,G4 r q>o}8H?g,|GkfeaKx C=9nc*=5|8J#:H2Nwr̄Ǡt[dWZ #h4!|5OZtS\VkQy`?JM Q\]Jeqok+^g([Pj;DW@*w<_V!錺}B"7[z{BD6ʮi)ׂnH.m^~ť55J[^_\$jz"ӼOɥB:'Qxw"qшu/[I@\O\LIb<]4 a`!++.挆ZXb(9}g|]'!=Ef: ;uMXLOe2/-ll&v@ڔ3<ݸt <]MЌ:%ygKԠ<.W!A/>_(l ߱ȇ;[ʇ俭7sWfӽlĄjBJ\9h`Y}  !5/H/;åbY-hBtte6_|nDO?5j@hlp9'4[+4~ÉoŋtlPvuҪ2ĕQq!\g 0b,$pA)NJ@QVRO(kpD:ā^qfCgW)Z ?1:b.A>C Xfh垬CUiz r󇊘ŷ[t!xaGQ o"#j$up4 PXПfܮzbAȶ\ " yԘj(@TD Ɇwj\} fAnFT 3LԽD,MpP򼄵]_c:*0 Rr4_?e,=q=N%߇RJ[gN=:׏T Ws'br[CRb5ќК̲y( p+(񲏩kp$E},YN@R2'=> t1K f״)@yy3Oe@akOb^"T$hu x@)`CZ Ih a?$ 83vӨE-##zu#|mOr@NJշ]K6|KyR3 t0.S+vV^O*o^62a1`z{,x'x`\BNB$8b&=N~odF)DXpLI9j!֎aCE1).yҳ49]e36{Q)¾|"D5u% sL+X#+fBOigDvI}i<M. ~$~k(#ZX.f"t+McN_ o0wn3oX3{g`NA3^W9̓=RL @Ɣ!.s΂\$kϏt P'n'Ӑ*]=.6 EDW9IJϻzO14ʽr%=OƑGdN=*J0OaJTnnmbxͺ+ !F!|I#rpK=(4-9_AZLv%~ ze.21ebYjTngr%#I>ih=3`sӉ}r8^vR_aLM\׼N#ɆolǼݢ+@ՁT;He^0x/tlQν%z| 5#Q]<6+Z pf 0Рn7!$dhyk@BBД]Ҽ#YsɪdP1LER8@X| bl/\;9h!o؃j1[si#X@XnODŘT,FB;Jym~d7ϳh?fh8 rq.%(~KeWaACAdtHʶ5*I8~pK)H Y̕s$n!a~˛g77\{VQy3q$ )M|q$ۮ_-"G#ą (T(utC4;Kq&]"}\b0httM }ς] ,,ѬfPtY6g`{}}۲w1咸Чx2gNAy>' Hv[+;`JMj|JuI'P!vVqH JǵOZ*Įe6[%Jyqa6w5GXr7=M0z\,a5ify{}25);3]S5fź x)Ā۱vJ$ Ip DYV8*?JڃXBWi -A`C ~Go֮6P1Wįh>MP րsG QܟE=乼f-j>@x , ,Ó  HڥT{0\7ۓ8I:Ϯ) 2> ̆좰!Br螀*ʋO:@pt,)q~`bW 6I Qp7u/G+~}ߵfDs,q8"cUP[\›ÙS_6[B8Jl]`eE QT3`{&AȨfw1S^I+\c~>`"B=cD' $9/8F.. ,YЀ[jVx L @wW1Q7XZv?uKOw= |Ztॎ Q .rj) +"D-ҘI5 ٪fV.z xō-v DLӂB0_l Rw{ϯ Ct'[@'(VC1`0<;g\ˋFfOJdA[77ho7J =f(&% 9(+ *"W$d*QjW/bGDzf;H$%Cz׀.ܻ,W^-=h<}]81摈Knr7 ˏ!.')$kLw# pA'4xHΝməA'y}&Q>,W#"#jjX>2RP!pl=YGg0i !?s0Abǖ2Ϙюծoúջ 3CxǷ=8CgvFC( 簎/`jHoĥ<ϙQf^3:Kho̭3w:s'/6" UA6JC1k O;eL +ܝrvooC75W S: ^o2apc LW) vuC*Jsf_Kȭq7b FiX7ve<"gH*25T\)% ~˙Kgc7L2fg))T(X@~Z|d㻀2BF $9r贎\W}+P 9 xY̎>%A"lGD&"#lCxE.Xk2\2bGzNZg\=7Jb:Ü<.e Â0Y6lo)dOCza](.o*\KVI-HbUz0|a)<ɴ89ygK* C3VtI$d[b7)0L~}#L4pL_xH~C'VݹiOƆ$FUu)ڄsNiа?ɬ1c4N $B*GMG/Bz -*E9>%kDUۤs*jJD{֙,` mVӂ7ٌC3&gq%&eeu>oK~HŒ/L.T촡(1Sb4Ӛ+kϿPö`i~ LGb~6 OUyi6Q.@~k5yCx/9I+)3i8.1}I6EAN"JA[Ʃjryq|RSjVI\e3%8UpVo[_l^Hd%VFta.W~QQ4o-w/U`GEЍ J "dA$1&`,ݴDB6iΣƈws!ۯ +F}KSY\$Uȁ٣*h iJGffV T8(:xk^pL%8|O-U 6LZՋ0J~yNZc2٤q2:Xb4*2׊33=}<`AȖ[7\j;VqJˀCN R4|r9M,K?A5ꙛ@. JgR.je1CϜUH)#.@ kzYj@" \]ZX9gPc=>LaүJjG3F\t-wbrKW&RI[r6J t9ӎLZCћJN+˺0"DE8T ^=6Ooӏ6^ԑh.G{߈e*5UTq>Ri(̵91a2/8OlXxk 78R h1\'}y-o}vjz|hi ɚSȉN`bFyC!P)F(I 5H%N3 szԓBԔrwL1N>f־nM5_TZ(oF|_/QmaȌ2 M&e0G&!4!/:z,G2S  <b ّ;[M{"ʩhO}{ G!4DĜ2QEh(=>Aľ}H,9.X(c @|%u?V"On=:mBL$rJ*,ebTy(^z_D!f:I_Këgj}H-q~D!p^+xꇳ=mV L\+MɼpK~A sڎ%4GP9'~w=Q7C}DAGLKJ u99؃l[qU<:zwk"cJLQo:IGjgY``I/NN 杘4ٺQHeΐZ`=Q?g:@6p)2Gsh^;VAvW# $;Q 5 g$M0r!1k@ 1\ЅjE a&/MGCLY%;ˀ[xةQ V$H2/IC # VVPnY^9*lKYi8tSiv"Ź |4 [3{#֨8mn“s6Z4Hʡp/ϢTc%fmTt H/ X`S2qfL7q)1=d.&obkPݷF%7s`axgSmo]TˑR|odnf8;>2m?<(n8&xјʚhIW&OkÆ\pJ\%( $B{-HW{k(4۞wiĄ5݋!s N $+@8yK!^LeB M " O>Vb E73Z ; t"o)rqjnr~:yzEN6FY]BCfbF `0CYt)rhWev19q/tX b^ [o&<C9R`+`rio.È{Jk{2y\굚; B2I]1,@ZuN C([G*=ps #0.8q*jJ8wZXlTB:;C+3"ƯHG2#+eoIgJ_*Q BPCz͉z#~M #DKQ0ĝܱ\㱕iu7kVTמ{/mFaHhpUVT@f|UcJ3~/ִ3]H 2 t\gY !RCzt7oTAk{vonPZ·`vvOnNϡ[nHGGJ{`myԠLH\g)۳6q b{H{8@d:o?#;Kt<2b^s T]+Qmݣ#rMV:8BljO|v}<30!åcz4_:[AW~: \_kꖝ96C<,hV H  ,.kd#bu /Rܹ| nkݽ:EjRmmtѮ[r#i w` f(BpPn&3THfᑌFegr9aFC#&1MK%|>>0eA@j#Qz6wJH:%uu0㾏šXn{FY(P z??o$!R9r@z"0R@@055;P^ ΰl$pF+Jg$>fqT Qv\kOa g9lXھiܶ^_w1oy~ƉӾ׈b]I.4l?yt,*UŒkz iME>8MJh8؍9Q7r(0t3m5dφy I#}C,Ͳl}#5rm?63O_`mt=!'ͦvܡ̋ΓZj!{uƒ}H{Ľt!|OB]Sw\.81?E;>wawKc%ZF :zIAiu%i>*,,vvgc) yx&_*ªջCK6wꍁAy4L?#cxb^+Z1Ѳ0RMmSd(IgW&6\nȥ=M^u# I(eNǾt-P#)=X[[٘7S> ȖIFWQkTwr6-k/+&hp!<ˁH^9TmǬj ƳYyr]Zj cU|?gf#9> 7 r?7#5guUj#꾘0h-MgM=ί_\t'Y=hݢeн|XD|Le!/#fjE}E ::: W{j%ƥ`E&׿d+a(;LOg |h t3tWw5 W{楁THj;8j{uT SM0W$ aZObsڮA樞.Q#K*ܴ2QA Aq~& a.dﭞI{\uN/aH;_iȏ!?:(Emǚž39c4j"yCFtWFpNMs ݮ|NSӒղg' *Ϗ @ &4fa.[%.:OT Rœ8mFP+T@?}̍sSs$kǒ~W U˜K`BJӈR|O'_^0QZ :¾0<$V,ްʲ.KZV*HzO4ƠhŅn],W؜{^-*>H{2B]318,~.7Z!8Nd؞)Dž!2Җ C,Kp3gGMyMGBrNApuܴH!pQXO@L xRbdVKl0J@v 8pxu0a ԶKK"ő:bN*5u9.4hh2#P9t\cyVV/nƇ/nxE7dƧ< 'qood n~6[%"yyzB#{KQi鼆/N.Oӿ@771 (mXyIڢdn'F%~9BH/ |a5`G1Rɓ@UMqviʑ>c2 &A 9(=/I\Jq^gu2anYN=Nq<c@ Ѧe4ԧϮOgay,$Dgc2iu=h21zwh'ˆwS/j4v[. iAF啩7OӍ3z<6~T\1ף2(t%'f:l r)D֌¯A%6PER=Qs}#5x\K #,%ZB+L@\h gq%'5S%|H; S E_ ?yhԃiזpsYQoW;q1c"F$ĺaYBhH&Gʐ~A[&-|v(FY'XKYW:y> [H(RL{<ޔWˇpRD'DJ %cWju!d.kث~~vތN#>(Dj> ( Ls^3Q8x*0ITojhR.`4u]ƨwxPWOfuѰ̬FeMT;ݴ@v=&]~SG#XqkČixWQew;i e=CEmiIQ06c棨?sr,[ba.1hs3 Z2ٜ(4aL mCKc&N/%<#HKcM}&aeƘXfr+*KAp0w}+(ڎhďkI-4 {M!@OvBOEs~Af?)Xce Oo|:ϱD/zm}vc:Z(H>#Cr`?'-;XÁS?5$``g@kU"`gF! ̔Ma%C͒[Jk ǔ?א 4aΈ9ޞoLޝ!T^WfLAjzB5G-ٱ8ie_f1ݽ iiAgkXO?2ڼx:5C 9mQ26~C1ŪD[cZ_w#?f΋3% %g, /!d]jPc ZqLR /dtemkY*}t:I]0KOT?sߛiH٧>ՇߓNћV&ZV5-U`Ox_Pqw HbF|q+v9 F1)[!K"_!<=mcs Y+Ԏhթd _T8ܵfXUHdoxD3"VJ J=5TGSf6z 8S<%^(Xթ^VcT$i Y "HX"F̫΄/E vr5wђŽ D=wAԕe37HFj d>3}.6%9 FP b&Ŷ1 fRBf5?zE08`4{y $$Sb&|GfЏԼwlbY[U7_7^r߭0Ls$"Vq%u]iIEEϫ Ĭqh͗Ctld:?Lj;P!whLM|_1aU%o'Qc `dggA-Riн'+`bdW.<Xk@ŖJ6s$Ct26ٔYВ9->l0{=ƼY1܀YMe["K|C5X [1@|-yy>fD3/S9]ƖtfFxBp7!2niU&9#wD!.ZugiK XL4zrí?[ySyb8h2&_ɱ[2[H,˽ _n1Lɢ6yN]Me=0YJآ31OΤjmTF,@P}ԗ+CL7J3uZ9NُWh9':6ؗbyTM'Bʨ#Kn1sygxY0ރaxF|qeny Z/6-q1 ö )Twpж1%6fwN>ۡ$pd̟ B8"pz38pyv>4s_:C[[DpEO!pQY ]'fNq+~^u^z̎@3g %jAg?p lĕsY] C)4sfr]G2]R l6.CsljWJBT!OTH$q@\{~^ΉM.{yF%_[plinfqGKuIGѵ!^]ƲI[Ss`Pd7/ҶWٹ2(;%ԙba9']vt琼B&J4%ę0wM#T1#ƍH] Ȧg[=.3>C;F'yO%.8S=zXs,MEqp d0t#}6mUj+֙}VhM+~>E5>{TlW/L*"Egl7>^_c?b²5 fj8i9rw$茸ӥFĠWD~_KK?uI8"d[_`Ԥm%?Xd?`6G'YǷ=>DLڽM[7> c8 [( mK"7Ss3ݨWcLǰgV]᧿>:ͥ:ӫ?k^RŦ!39nM5X>T;ѪEml 7:v9=T6c..\cyz Vݗtr}8>,=z'Q*b<ގ63=83!1?%:2\gi{uÛJO.lm/X@x^gKr}b}Es7 nۻ)7v;"n|7lw]n7ݜGxD̴` Glcsw}{Dzra=8!k6U[ׯ[^|rmKnhA b, Mю̰2\>: ?sMۉNY]}.sj6hODϾhrŘL#x1x8~ 8NBp9osgfgud\ybcن ׆$YA~Ǫio}slz5 PYrK-O.Ը @-wAmpa-7!X-94\r,t2uk*IFVc.paݚbLpEc],2(Bh! ]z-WCWD- 6S"&ؠ3}&/S5% :~Ewdq~wǠ DE{Ič3O!K&n߭&b~>[_?h+ܐU˔Ɇ0+@R HĤ# apMƮ%3SuoЪ~nL&fA?wosҽ1j&,^v 3C| H\z!=\no(JDsUymnrJNʛ gV\^?%qwhp՛e$\!K=j[r[c~ufj.p&'K&a{FwU9F5;D9\ϒI e _E豀d}w"Ίؕ*]NMO4Cw}aEZ,ߕp[EQ|*AjD@Qyeb%1ֽ^c qx(2ЁPN:@3ʺ %Lj}C{uzhy}=Y ![ JvI'09p Dac74r?oEŌy-e6qk53$}cdwCHh$6#Ȝ~jtK2G]Fk|Mɡ{9?8y bUInZ ѦlۦUQcxT?EnzҵCR?l[Fy=)oϩ_/# Z"%%b:3wb~;X`$V0L_1ęԐs@549^Bk77v#X3D;#8WuD'"AӘW|0GO Ե`c7tR8FO";!#C[) h`xX|` Bc>F |!i22\x={' acwWsF c&IAdlpHA!&qO+SnkYڨrhӕBLX h)r2|vh4vX`<.-Z$$FS _;yڱ""yΉx){├/G=Z˖*ֳXz/UyNJJ !#Qux r^َo=,ƱJ^ Kƿ>V[A g}pzȎKk?#ZWzqkIKrb&C<寚kTIB?vɲ&Wt3Nn7#`FQ,ޗpX9>"99nc QR[LtK}aK"9VA9o#E <)l_Gbq@rDԬYe^jJVdhsOp|}*vgIjzޖ ǯGC)=J+OI\b /ϭ<ʳ5YeB|dM-Y)2޴Vw[ulKŅL*(p\@@G:M`Zl$LdRwA,;6Q)Lf{̫]/g&ҟQt<"<S1E?GfOedɊ2tKJ8M@?ԓWn/ $Ww֩\64GT׵IS4)"p%>Tq[g 5zS3XR){ֆ-qtyMݞ7Y٪gB`hݯV]Ag YF+6R,&DkH5mp=mVߝ:q{b"O2鎼(Ezw30rϋ}n$AQ 7PU6:bnR׏FQ ndiWW15\|,ҵ+"h_ "#? ~}\ܘ NuDrr#Q(/id w5" #Pu*2fA NYV ͼFjm9b;3&1"rϞ"ܟ[SRsnkҘ#ė­!`*~H%Uȿhp\djbQFGnSO4,A "d{xX !J rY:H-$2D$BB1?G[8+]i>O gaVڀsE HNq]ּb`N@v"$LuHq’'aZJ#)=|;$drR}y2~VV2R%]vIaG+i-2h g͌PE]cqM4 HqƛZ -Tvid~ZUHsiwLkdAOuL 'U>VmGVt6/[B.v7ڱYl^mPyOe:-nVvy^=<0Mci35fwɏXX,F cz;O#qs oA=A`5i9|Y*2`Nd_`mZuob;|ӕWe0h8} bqZDP@ӃUE҈P`|$jbu̻` :~4>A*B*d6dʃ~,ui{" nw9Qy/b?'䵝kvdofr3}TImϪ[}Bs)2!CT6}Gݡ3>tvVG sQsvǗyC|Ũub!1#K-teZ٩I9nL3h KݝU]156+<9.ڒKVJNoG#T:+`sH <%-`X}J/༜'7 Vkd7M!-GM˨P ; 0\gr;(dTǀdsgkHnZ˵-}Ϋp U8 []N "3u暉t&PcH5hiԄ5KYT=Ļxntƞw}"$[[ة)(CbJ 2蚯R,4kk-}Kz?ɒf[#Vg^ 3, 9D*2@n<UY)< 7Lq3+2Ćɷ:5U#vLMR߶I)0`KmD)e)M̋Ǽ0C~A}֌lR v5@ 79*!vI"v)k3eZ[}f;6$-& Uq 5&Uu~93daJ XmAN9܏?oȷ̖26pI8wc^ _^e@$C O/A*3;m>ؿ~|}\Ȼ-*j 7CzѷWj5@DwuBf-d̷ײbzNcI_xI1\aם=#DVK>Z40ȌQ:& HN yr,u{t'd~?E{ّsJr'Iр7ſd[pQ8?S\EƢ/_~^ qgF徔ȟՑ:ޣmky Q E|0t#BI7 5cѦGzZ^[l](?~Pь5hb|O-N>uY.1$AwpWåQҫ= w1BVE*q7=Y2)BGj0x@GdqK6wx}砖&W+yl2߾ŧ''VO_ ;dYXlh)s15RbB 'r/KF .}9P'L)_wxLZڼ9m!Hԗu+agO} RS9~?^FΕnnѪbwqR6 * IԾL03gѺrN/# 9mS;bVWVG}M\W/Ddqa*p3P:{6Kyd=r^@lv'w8m'w^MH1J2|;ϳ*u y4g{bҎV'7E bo4nA[BTIk& (dquN~wJu< бw!2pw\qً} EN,NyN9=\=s|u\(>~!)ح wp]|AfqDR + pwk ' !d#2 4@5[;8pp+x^S~oa^2njшIEBzI1}|#k=Ѣ 6`c &(X@aCOE4;r'nio[fh7*>G*1 њ7թΞg[Y<2L%ĩO.q8EzE,izTs#1sԙ16 f'n}:bg=Ј> UGK@|ߖ/t´lNvKH[Y!DV ˃8q5j1@Co_ʧn 1|庥jyqoֿs6YIŸR2_X@aV VHĐ[$c "ٻoB I1H 򿟐`ߡ]u4y},;LJ;ON[Wp{̿*cy^y;wc\5fNTD"n¤4i,%$dB@Iű2pw<\-Yk|[zK8o' ^FcPQhƓEF5Ǝɨ&F$2!;>짮} vZ/"fsyMͼ]a@ bNQn먢mݸQ%ɦbTi$0g?޼1V[wÁM-Z5REAF $"ƾW{O~ j0oO&M82 ȤH2H@ . &+Fbοp.}t |6o/<͞c଎# "RZhȩ,e HU5 TS$*5UV];mњG+7!0#2L" "°<W:Ѱ*ZL鮆8g_~rܩ5 D A#TJ$ !I|}O`=H АK4z=?|Wᾋ{|41J(2TRP%i|[R35keADYmgE 6 $!D $8 Uв>iOCvo7?kH$PQ!Wm6#bd&P7lD#z9$(` JHDF|/W}{ϮF=2B R޽z}}=ED "JOu}@~廻nrΒ!cH!a(DRqϫ~ѡRQ@1J"do/\j2#7"$_c^.BAg8Slou}7 3$6 2QB(Q$J!LDD `WI2X&n0i/x,b K LI$d&E,H $1I#4%$]$2 ,={&3!|3`4DA(%e^݉3"h4!F1QebP3M;$#/ިg_ dl$$g5 |]3k2_ ;F`ADlLhS~|JiF(&" 0(h&I&0=S"#0W)M*ce}3R@b!A$%#&J+^ !)#rl2'l!L w0L%$DȠ$cHQ$IQ5}5D}ocd$&,zmI%Ai0TdFĄIdQ%XK3 Z"1D 9đK10E3)JM ”cvfL$L ι0 #4h](Y DeFwsъ ؂FR,&(i=uƍnwsc$T$eF2 \Lh e+P(Le#4dDF*&Ih#DP dSӺ`!6Sb3Li+ orݔh(iQDR#H"MFB52h(@lVcCLRhК,d(AiY%2ouoEDLЌT$)#Q%Ah*MeX#cQTBһcQM](H&2DFI `0lQ` h$DQLcWqm|[o` ]X-Gi(ߡgs߿\{S#*gii02`Ȓ, B I#[(詺ޅmAu_߮c.̪v+iyγ ]mp'A,#;|V5md֢XڍIQ&V1ҌjI3l"LPcM"RX"LEmkZ]_Q? $Hn$%M@vl(?у#JmD@Z: #REpE"d;[hH$I4c5\ ~c7벴 3J1 I#\1VTuGWv;ؐK,1Jek`KŔ` JZ^x+^C^y' 9S lz?\o`'"'>oһe/R1P_uZ [ =SUvW~~_}@E3A*`Y!Gb*$vȮ-P-Su]/7w3h'ҀZ1EEИQb۵8`r A}󓲳P I'uV "AQ ('35.W_[Z>Ž03QЊ pp~imꁠh XJ̭`ѴTPPjKfi#T6Ʊc%[XjJfM$jJ4W|cV_kʒI-hHtSaQ~M^sG >V«}^=FW7TU4`@MR02( R@(u޾5JenboX K-wY7Dvz] Gy_﯍u?_o{x_Yy%mG=ls39CD+ 24>w (*B b b* Jm[ZljZ6Z(eW1Q~M"H–}!:Z(FlYpX,f ޚĨ#Zt{dUHFi1Wo xȗUb:*k'G/"<cLA q9gtCuzA+6.4'>fWaF PT-Ww J1ɃkGk^DO)B(HCuv\*uK4= ;K/<,yyqJ'ӵ%bmiT!äT|껰?hVl"@-`6T(OXӄމ>{/\=>t 36>{PNX=쫣Ih\fc=QTYBP K IzCΐU 2^jf'y KtA@Y6Ŭ,mՍ$XAI ~>WGYWIyBD"v \ fM;EqgZ./ytw^=:gҜɵk_k# O\AձB,%#-2b6&Ţ6f+cF*iM` "nT(ƍ&ω ph0}X^ze rP'6H+GkVt08N/e}k{ƶD֖V/_l1a(7Sw~wM|#1N.*OGqJ} A!5FKDA B[9b(WqZZ>BtA gh"! 9JWI|ie hӦ?5B$Oq}s Fm]նuU*n@DQ^_ɶ2鼺j_.%?>C "~w!M8R6ojZ3;XOǧh99צpxXIY!uLcqKUɕo]OJ}6Z}{W' ȱE<4ic44{:zVoUݐ m9?: 9yUG5E<<Dža`4qTٺWboAG [YS5`nvGkطvCms{J48PmzwE\֨ot@˚Op8* NL2b#$h"ȧkص41\AKt&kV5sJ~0:o . ;euZ2_t61Cy ?Я7,}I0AʈP(4JUR7SmI;Ydfύ)@{e1O]8= a= jl@..׾FABk-0e1"*LDLT2P(A@ S#XS"Ɋ̤F2S4m3cL,` 6!#RQmR%DZ"ƙQ& MQlYeX~>w2C^.*_TZAXEnBgTBiѫH>ܰC;^mO5DI$I$RGLDPG|Ϫ=Oc];.ptR7kn¯{]9Ĭ~usV#Ts0ڷ^ӯ|o>t[x-/7~#bgdPIDD ;=iY7;:o&i]ayMw)pZS:bК@1S,bFƱcj bIw1%5m_Wx TPTZ󏖘Zhr:\#K&UFp#*h&_֜vnړ sgSw+RY`Ip0 :ʀy>ژԈSE2A&$CNEϿ| %x WdJ1%֢`hO:o#ozoL 1?yƻAֈ-\< 8' &x#ˣjWp22Zi隤yh\!@p[3~u"(rx檅i& Ƌ qHqk"M@4 gz rKV A'UBX Rvbg!@fr VaIUuH:}V9$-ra$򆹋ure=Yl4 MѫIi^OjNJx樃 u>xgGXFI`S\/K z Oz^݊}I\GV٪EK`Bp"Y2$U޲*Bmo]RR]q58B/c|m  bd9Zpɍ ݚf#06;৅k5_mZ͊d~7բvihrzF i?8 ,`X0*zECu}<+.|qvz\V?/"PNB5"$Ȯ~GOoKs)9Lv܀!4+(l($ݕ GVaR1t)t%By`}Cv 5wReS#, ƣ":e{x|Ϝs9v*ZJh[p25zk{at+yapu/ 'MR=c$)9,G  g92 v @Mp'͈e7>$|e9lؗM jHߌ&@ 0HҌY` Ӱ~!f7y_#H!c;c.:?# ~oHo2ݠ'䭰+` g?>.?v/ht?L_%A$E'unu Z}^a,*Hp?8ﺜ+E?p\e X"]`+D3R+ы#G/eL[.9[V/=3#8wO06@eG 昢eDT20(;UtPi7U;?ஶ_}0#@E8" 0Q'Q1+' q`اH"HE pVbs`(0Q,I˪AJƈkm,%n8''$^TY*T_R혐4E%X3E$ gSwR/|UR<##t$)oĮ$IurGEB8؃,v .d,%H2UYhgo.NzqhLVs;w*:7(Za MP\A~?yȡ4躮'xj =^c=#=*Ïzv/qƳ{ n5($& ԀS M.l=E|'10h|zP2۷I [R/ ېD 2`YzlQHE]USw-w"o>;y|LS'+}q[* [f TUT)@8{eodL`<_֖h՛UKjE]4Ht:MO@lD& ɢt7e7!maнw e4Lݲffger ~ϫO^E7a_uAjҮ am)6!$Ti3FUABĆ0_^t4dӾY9YO6}>[8DMd@jd 0N't:MXI Fy[zӺ׽6o^w\?~Wz xDH'ɵpЫ֖V}=0oegsWH!+50HXzƿY?uuO[=dz|N'8?m_OuPT7Dy.N/w ʹQeݴތ32s/#65**/ ™q@?9c.>?!"  H @PȄ _8uU4c/&@VNެbos A Y^km}؞;vq7yU4N{1d9, Zy31vz]d>gzb]^];s(\wX}&^pQk?|yx rZ4Nf 4`.6SPj\Ɓjr\N#v ` P gק@!쓠 "p>q^gO8'0`kqzߌ0L)#i"=Y.wsPIcCruVkʻ/5dѝk<m-Wz˫|Wl՟O VWG6$ 7Ng935ֳ·jyC[zkER=aU ":R֍it2hB'G ;oU{2λe5TC5\Rx-ҡ4*viG2by{j -p&Mc3έ #Wy\BXyV ј;lXmVRzuBf8XtBVիwCs:r;z;ٮfWz@vzޚa'őd~yDA[u]1JZU"W~ ^Bg)QLp[Ucx;pBJD-90N2`j7A[J,n>\7$Au]\x7!BbSh6Q)Q( AܠI vؖū~X152k0բqddd@WX\Q&4-ڊk}[W|2xR:;Nk ,I{V׾ְcetĎfY)}_v*mwdW1\ܷ v?ų[UqGi>GRܛf|N.>446ϳN:TrWпǑlȡĝNV]uL?κl[tܣ/k$PG ,V/ٯeΨ;ij`cg~6tKOZe:u* _Z_RV|zyfuS1b/>](^Ru&7rIsqn|6[tE.;x P Igkhrv7=t>]}^[uWz5nMyf3.w is] TfE@傟SiD)K^s40XM-k9 i*Aveʹ2jC*C?VK+Ԧe U)ۦڠ{yW}ߩMunt?݅ǂms̹+w#*)D5 f4aI'^(2Dz˛~׈87Ȓ5^«f[)˿B * >Mg* $n\:\cDq&YindQhAkש6p7B^n@-V\BdLkxᤙF=AFZ& v7Ph^?"=m Ö[32wBv%zk q٩VOnO'RkI]*Mm{!LX[6VT&h/}U\栍Lp;Ĉe6фH5n;./ 2DA|HA!pmfU>x~#ELmG]y0o,cr0*h৥$ZBL4Qع- 3 ,E]V7O>y^uuuPvf2M[8*Rr-֞r3uAdNM( d`th _iB';4s8UKIV;c9ϬVY'.z Zc'y}{43(2B -+dW) *Lux)ֹ`yq+Zv灏X5 w.ZZxGe?m?Ua|l4$H2"(?s6)pEgʋ "ʩ|]EwO^N [KqZh(hg?C,QdCws6" EB,I At*~y-SǷ7c ZTE"P(AEB@JHB6 ͕T?! ^σoCWYY8nK/3MuÄФ4܂C0EA#Ch[wǠ괿_&# [.7~|$](t/h*<aD[d2tP8+%nD ic+42"-oѩuU=G9o[v!w_̮WQjP*F^d6Q4v3,2B!f5n-3xAӕ'G7)+ҰFvEv*$any֪]-a"CJdIBG9%u+#2)էbp2lǣL"ԠqmhޕۺX gTrb>iՉ-%tZ=d6ҏE8CVj VLٴQcP4P<b$eN(^h0KXH=P\cP`F߻'S$iI;c$]?i< ✽iIÿm`{^7ktqFGT^RFTW~%x_r`\!>""I%6s,OKӀʉ@2bI [B :Q[ vc)!R\EHaBa Ik@$L҉fZ.ZbZ,GcQcY.9mcM}Rx^=mK|{Բ@(CTl3a̗X VF_OTۼg{/;!nUeƱxy,4&tD )"քEf@ ],pSطї ;TJ!YHeuBEs:B+Re~]YN 3$VP߄epIi[Q3$N%b̍X$Y9R[~-IFb߉>~:^/y—E6јU` 1 (,(H[U^k[Uk[EmETkm-Xb(ƪJبڍ1R7~J7ޝ'}W^H4̊`}sׂl%SS[t.3" r  ;~럹P.h#$ $4LDc!,j,ih22!%;dͼV@{6`7v 1T&e !bi^Mᠾ&EӞ}qقiF@GU/k}^0wvw?U%},ʼuZ 3Dn_d]P4eg $ IANjw+ [櫃8]yɜ&mǧp|iEE5[zJujϭ#ǻ|?2>ؼO |p?*]6ID pǏO-L8y73`bz-Ņ#p6CHX) 3m6{ x@/K,~YDNږJ+r|WK}]v JZD() _\CnPa)#:UY UUߤՆ*_Mެ8G/$*hk1#+_ =eJ[9jЅkT, Gt񑾷OJ9\Օ߶.⩌w| >ygnvذG%rPm ׳KNB35ltlg~`{گ:ہ*dV[dpGTAb DN#b$I֚굣n{Mdb}lģե'Wt 0'gG 8=Z5 ,sV,p\?i 5sTSw[]6b*R FF i T4O`B$3şNźoK{0X2.'nf%a% SMVQṆT$ H: ~U͐Y~O>j{,TFQXbIHDH l&OrO(+| EHBIk %"$>c_c{K"W $A"禡 nU0zC$Q. V2A))@f0H-aRX;̈́{n0[=ggm':tzJ鷣wӝ &Q$hA.&xtlr 1݄z4-!Vy\czB~C!~ $C考T lfhc7U!>{j ukA8'(-oVKaY>]#,>cQ`p@vi,W*BHrV-UTAv.k] ɪu> Ddpwk;]lF|9؀pT@K)FU /o3ltϝYɰfYCIE уEqzye 8~|kt6Ef\] (U8R nA|:{0Hpb8l$攌浂5&vϐQoNJrL#s1zC$<+gFvYVQyNFVkk 4" RP2rOS:D {ˈ\) f5u*(%AxQBz>gmN\PN Db@HR z|gѺuE*X# PgadN!eU8d9ٙ;.x{kl a{FP$\eeV^2Ql`ݐ{8 t0(u'ĪGƶ-~\,0`9gi8g>UϓB#,ʰabW ~7ݳ?C^ Eu1+"!Jgrv;/M=Oqoku%}@"Qw!JrA ! kFU_u\p:uܹsr<{SRU)@v*sT5*Qr>b6ͬ_WIq฻#e6+ j׻ZVkSV - PB.U B~=NWeit_EF8\BfkH 0=r.hWҰ &6z {yj#^N爷Yn"+څ`r)}yf6 ,OSe(촨WrƖrI`-$Kl.WhT5r:%m5i1v}p , uVn~sHIc3 PT(K ]hS5[2Tq*Px4 EaTd&SL+^/DUcwcg.o'|X$xH}mW~oկsL+qq]d{"tZ)2*Rz-K.;aY̫/d\2+?;?"}4z=`#D~߮I<ԦFy,֢Zg X+a.9^ ⶹ_í]vOE4@ HT⊦= o Ԋ#D>lqQos0v1G%9  +zL=CHܔ8:G7ynKX(k (HBY}l^%?(QjMj8>/'A-TՊVTXbfgn.5t4C2# #, P/nw!( zϹ\pҤ80ZxB͛aeFb`_q$rAc֩" iuQܲ> nsz|k 8جrS.=~w|18)Q^,)2 @fG* dE˶EJ]秚c$x>P⨜z*8I>3u&Q9(oXHZ Xt3XQeVi#D'22$fI`;3eP[*\#%RJ2t(lD"TW3U}Ǚw3eFô?"oh"`@oT9RAQ8$V0za~W=%\Z5tI 2_ra5D: ;&x:U+p\?kn3᫡ɺ\w.xAQ6RD?KuaOw˗XP?\d!͎.}2鉄J9S  HDT.vX#\\@NO!}mqU74QUvZ0"*>oZ }6/]zǿmU~̢BF:7/_}]v'mqJ|ݙѝcZ8%_XVnu]? N[cqb8KE )H_$_yK ˉjj3M?jm5: 8j WmZC?'hP"QW#ѯ!۩X%R4sgM~QD"ls*lQx0F(/ BE,!) vKUY#[ }G!@X^W1SoUR-" wvvmû{"xI;v _{{9>_ROݯaMp5F8jLW劥! o[d$J$6=U(V^J  (z !A* I l4ilѪ=(תZ}e18> #S؂m=&CłG1DU6bӣpD֟݀́r@xjf 10 @oD BV*(TtɔCIE'ǯ@gőny[ʡY֪ U.S+IK1v2|D Eפ.Yd1?8oc-6{{k&|oU]g(?s l'j{I%P8 ^&8͛ZTVYR-Qb!([! q sf9Jkr_ 71M~?l@\p\ Z"T[ #V!Gޞջܼ.VlQEAN9A0P-Q!o_:kUm+,2AD `/D@`|sگ֛m bZN|:]oMZǵJҡb\FR,?px2 0P$RdRظ?#W ]+w]ekқI^i{D\ŀˤT3ymzT Tb" $P_']ZӾKB?]V@² +cb cedBe)mϢL$P:O*֪ ;:HӶiWz @j́<F4*;7>_5j- 0Z'q{=v/i IH @@houyhppZ]*_8;b0Dd}r*Qxhyًxz,Ug5+!BhhKYB̳Kt.l@ ڢIG2h_<-.膈?V;K`1Zr k 0%eQXCI-+*bumrBΕ qվ*wdmq!sðd)iUcšy˳S$AUp̤A|ZȰ,IUe[0|2;i^&A]j^)zeh?>韒Рϛ9F,KЖY芢:܄`-d՜T[4tfd2Q 84H=8&KU(z@swԩU[+yii_}v/v#gGq?#NOֆ2&n~;_o#?b5` `&Jյjmwp'h @9PUev᠁GIC-V.U>BCʻ2t"J*T@R:PF'n 'xjH#J?,R/;><3+p$P4D-E ݖ-VSLҁmTI6؈ R(b!&kjsb%Qb"fOW}ye0!Uv2jBIx` N>_ p*c)`)ik#&CR[ujV/k'8BphCYBli] b:Q:m|HbF JYFwiܭ7дt%d8RD Auhe_ΔgnEDC7}ua60`4*kvhuJ1,̢]+kn"o-ݵ. *-b1 Id*(o+'VS5xokm;Nҵ@!@ Z 뚜__j9Cf B$B@ZHiͪyEC*z6P8 p=*A`(@KiB(U=: 3fA!T]׎D$dBBE1P2涖YRiM2JƈJUFU" *F]*g* :40IjlD (y"?N(}?˧4&q3dYսv`6e - 0j"#A"(H\Ykernlab/data/promotergene.rda0000644000175100001440000000433112774400040016014 0ustar hornikusersBZh91AY&SY_<6H  @/݀a AUR?!T44 }7 ֡2 "u ZG zˍ ؄ g!Udd.xXթ!Z'\[4gM͌93e@;,6ҭMo{OUuf/,jY $-[M R->o˕äjF@26rpENpVrƜFM]P&]C؞aԥa tcXk vMLua n;ǂh\`\2eqD)sL 8X-s. Nή+1u FMƙMPIa+:2 c;)\z,{:\*4KL8C4dV`9x,xcmr^gif'1`v!:y w sv|3iIN`9^#mk"-`,g+ZmQLL1vh ŘDϫrsTYpV.sq{ϼsg8|AA;SK&m+PF G<5b,fTfo3Rk N^QFMC"bA䝊w^w>.tx'85eI+n#Gkl+Nl+V.#˒\d*D4<4Hb`Æ2T’-`j*,IgS>yf $V3aAqj` (ɼ>2Ec9u1LQ{y%r+oN|0iXX )) ԣDz팽Hiț \Rؚc2F ` ѻ6rV%3$5˩M t&3-i*btg1@tsΧxy:2YzZw^r mmS-X"EnD$yY*=ɮV߃NBl0kgb똂 $Qc-^~Nb&~39sUUUUUUUUUUUUUUUUUUU{1cU*uUUUUUUUUUUy""":~WD8oV`d]8qE2@ C-XM^h8\,:cDz-oq7(!NL\f[_4ZҒܶ+IXGBr$_tJp4W/dgxK+U*3ru~fY$d1x=V"ߪ;0z]2%iK0Q#)[5J M/ 1}.73bu7@Z˫Z]w@zK 7wr0كXuGAbfcP N1Sza?o%'Mu9hrꄇc~ ~ 1R̝;,r ! Aه䣬 =\ Qb5b 8 qV贄؋/[k&İD[xUU_P&m*I[^G!`>X2ŗ4@:φ.hĹkUq4Q <̫:qQB{n,Q"v$I9!:uЭ BASMu~ @ѧ$N^;z[UPvwjZ8IӦB*|\8URV$m VxyiCeÚl: IXuq+u]Y0BUOOCEAHp-WEwUMKwⱛQ/nDo ;r1njV\O m7w]X:.(ݎ*S2o|Dt^ez}C:!z2J*k}1=P~Q*a;'Ѳn \|;oZnv:Z1K3*?h'yƬ;_k,:\ʏOiF#14"LէŌ)N`>ݘh\Wí9v<3h ,6‚FY-C&b<0*{;w9wqڑK^*9\xNY,d ,N @A FXRޯ;ZiYSyǘ~];mty)\^6C]Dx|EmOV  JSj.T+g6P6M<8 MD+7*Ôaq,L ] hQg'$``'R'?a߻L hܲU6D+LԤn|_\`[>G&-o 5k4% .n nNlv fyDNQRL+y*hYCL}[P=ӈ s"_[3F%A 94F8Sҝձ'_?Mc)(jȗ0_w@+m({u IS_(msk(J.Y%u3t(|Y+ze̍ulV->IcquuV ?ew)o'AD5pgѪ' F뢢Xš@ι>fE蛂{m͝s&t3sZdmhgvFgCQȜ P)+Dy5%N@^5{zw.hy=|+xLD-<r@&8>TR\7MY,݌IqgŎz-Ca\ݳF<¯ uJ@4goAkנzfԉMx['&Mj-#ddn+Z;tFa c(쾙E - !sw`ēWB nc^O~)~n"ɊӉ߰x&Sh 5K1P/>@!aou_q;@uF08X譆°fo;%Z9/҈NFTߴM0`@x^YEFFU6<76pnn9HA ^E~;졔l]|[lˈURc`#qMy6e "?d+Ηl 䨈 $MYڒ́׿WV(hDvVnLٖ~քǮ&z˰!]Fn"͓ zQ?,>=4 C3\},@S_OE9 *K6-0)׿0@v_Ba+е̨#"3Yڰ!Q}ov)4g[ .~+23U/W]ϴ:LX[D@3OG }Wc5% ; P#Oy42~%!0Fj75]oi C !ۏ7? ;v2شeӺ6㫩W 1swgr-#}}j' TCF nqwG;YR ul輣u-/)blmrq |č&nS}XEO:Ѹl+kV37 ~hG bB8B t eҕÃ> +YbcS)Y s 9|"WWxЬ8)ZWI(KD:myg)߮ +TIxa(w F@r8YӰ;-ѾǴAv) mm󲎮V%HTB Ǭ0܅#g76yNsMJqn(m4X'ڳdšX5g(JiW",bYgk͞\|V5.Ou&O/e)2 ő QVR2P9Zfy0 g2e ab)5 FFA1{ O)l3Xn^t# VB&~q@6wH~޾,<[LG6>BkށrD,NQT; 7?>[N3ʂ9UV0b׶6ʲB/uws BIؚsG2ҧW ฯ )GΓjyH\sПqA@)xԜcRЄtq}'Ѐ*V_9ר(M͜A]c㠝Gޒ6/h9 ({ث_lMі|F[kGf$)懸':KRKF&ԚIm6Qp~FVkbQ36-n@Ѕfd?eT!؄1OI%CĽy`sx괒 ^:bwSZ۠+b7"ҊfH`#_ >RkmYh/ C j|7P hƱ?iڥ,bц#'l;Ι #$$Na[ږ7`}8frvq|G@i[W 9|&l[Vs՝Ea8LSn _ߐwrP;fD! 4Kq@F; ngDiM}mp ׮YT1z/L0 CV_Mv|ޝ:ƏFc%gf2+ț֫. n/)W4FᜱQ!"e AY]G&;Ba8IݤHHj^8uo][$g\nLmOz xaX G;|PǟΑhyܣp)n7ةtw'Ҍhhʀe "F@$)5W8)Z)ﰹ)i]x%ԋP-n jg$Av4r"spVY(2Mw'1~ηŐhVẜD/.bW`+ץ=D\#I}=& ^(T+rѷ ~XٻaT#_%ܪ"oNU Z^ 1p7aA $دpI}Qjmhp$rUb%Hn9JQ:evE4۶1E.NL2!LȬ_2՜HD̥}uZTg a݀ o^0J$qFIؠ}!O*[A^ptØ>T"ȽNJ4iHeN"˝ڤ` \IN킸x_;JP$B`n`Jk$z'{QE~ LM1x> u↓XM7q/ 2}$ 8>Tꓤ|Oi0PX{X2lsVqn Jc~>Ώ5P.%흯.VF+;lf<۳ "(R ȘYiOL,>pꝀ@T %潿p6dd(Qy8G]8B]cr}rK $*tYEtSF+'h3Sͳq'(HFsF#έ%]'p9桅seV~htjbkS a)v 5zY?lhVK~.C/U=_wd<GOIqͅD7%{ dԇ):dA!;}k B"^ϢH糳Ң0)h.Lv%\ƶ s! Hv|z'ElOn@ê5 .5Bs#5^!,Bm&wmT⭩?:TH w\ӆ{ghfJ,%Ы784PzU:C>ܮιqk|Uؽԟ1HGkWe (u8r:%׬ 9]1Y!iőn*9;U^*)uLfZN-C2 `F!NVt}1xnY :)ChWޚ ߖO[z\3?5U#L;Mc[5L +z0.ێfҍ\ۈ[ 9.S/KF=l"{46|2O-ÿ;Ѹ܅݃d:yٻn溚z7FNi_1EL8R[;^W-Kyf (%[ieVfvi=p9̺ygx;AߖLOK }1B?xvI"-Kg45~v/ZU0a- &N!&ᗮ Ep  ciQEgh|6UjL`1::uGAtn;(O|(YpI5Rf@H9\Fm|`_@aR[pGAa ráA?O^DP*FH]ٱFz{oZ+'Y#79`j)-qcFD2Ɯà ^o."e(f|e_H W ks UO?b+>h#b˸W++..x"zLkO.r ܿC "oτs `N{{oG |ǶHoD6N:RV |q" $bkM_&D8,iߐHPƧMkB#9wd5՟g $] $W#3~bn_5羼i|J'Eqsj(l} 4 icZP,˃6@n e3 ]~Ȏ.#?cnˤ\l2v')8)6Ê i:$,;3 P>m| -5ʰ]D'M5cveb0}jR8Ѱ'H,RZJׁ0 a˙57A1/K :r5)ZF^vGxi%L{ My ztٺK0)fg]x ec"^Xh|5HgpZ1&Ў2jĘP4~@ 7/ Lw ømL;vPqL9~P mR+BlO@]0+ƭ^38Xlc^EFFqbw] 2Xr;lFeg*uϣDl7bG`cK FH Ч?jϤ[ϝ؂ldRvO|u2l?:Mb!^O*g+V ^9M7EgEFI _[ y Y6l{gqwmz٪QPO@,иOY Bm D*$=ش;e+~F* >7BUf4BY9haЂ Z)ԧվOC+t!{3?kTmm3WA™(f lA>n\ۿϊQ%@Cq5Vlt0.Mud:$ 5%ޓU퓥MB9_B9Z{˛&׉tW߲d|gh3Q(?[G zM\'Tgc @"ȡn8PeޙUijX[BaMr4|_`,V7ŜDz^6ϷKܿ: /j/|4C%ϪBnaii+|g >qk`hٛ/ic?`KoT2r`W$:eW\b Xf*)l,WN1޴vˮnZcwA8y|g17 V }QxBʇ^stcPKBeS愣mP"Ze ipUó E.r ]) nn $]^H>({eȁ[*ѓQK{gIm4[>Do[|E h\&AJC U%TeALi9CSC"Jqy+Wo:Lr|# Mՠ q11{L0g০Y2Ba*fj ׁPO kP -tifha0ǖGH5Fi=; Imbr/a~x)ߠn lCH+F `m_h;`=G6 +Z6vy"\MZ0-QddѤt3CiO'py`3=cKWWf7Kԉ}ĘiNgShVu4WiT9ւQx?s$'0 bk)T1 @V5cuBDypP f Mc"q&c88}JQZe]?5*O6yQzOA墋w-"4:SAߋM71-iV2u:M*Sʅl @z}RGjtS?LUB0zĜk8`e/ٴޙiЍUi9‡J+LWȾuEtⓍ`RER`ZX/#inL$y[X^+lq%g'rW6d(W'8/apU2Y+d7PH)=XY |Z4M7*|vvQh7_`ꚯWY.C+JR׾bT[!ZYe"hA!Ai(Y:{G؅JPՃn3gVB4pPl_XMre%~ ;Ț; ηr.D6\G%y:b-3yl-6q_͜ 9:I?MIS'sAB1UjUI!)ч]A::LٿhYzuqI㩫qI̿i_?YxgvpPmc 0%zv=߷VK;`2iGG݇#3@.룱4[ʀ>>qQ4` 3S=NᦀԈ(T?ku@/~l@E{4ĕ%9]{ !N+7̛c1`%IE}4MͧqvB&K؀%BZ_l=q.z$mvGCÏh-DbR-F ڋ첡I7bHT\\s;2l;t4gҫ <+e絭0&ȅW8ׅK*S?R,g{c]}\ կtr!(zU~aulA=b%OoL?L'Y-XS99PrsͤMٵoGJdUt& zs5g0B((+ٜnW:I@,rʁakR:f<5Urۗ-/`ADm`tF٥C9=Y(^ EkYѝ+ l*yfzmĈ.H'bWplb/mdH4YLr) !/.j")8C#(lo9yRG1fj*!D'h\L B;" F,wC6_bRގAYD2Yn(8r'+9Y*@גUumb2wqw 2Ar2WefmfӮ;VO/bVR) w?X `vNL ^Ou;(s5!LL6 : ss~FȜF)f6Գ{|6qFHLn︀6H`)Fޯi58vd.˩SeLse|DtLg Y}oMq].2n!ncC>7*InuU}3*M0Β?OcEːhp.D:b].W%hll8ulG"AU5Ʊ1m諁KYj7-ƽS= 1 3JwA9Jz.ݹ)85UlMMŎ}&g)`.KR/:WsjÖTdSI| DPԮ^z^@QO3?Qۆ`% 3EofŏQ'5 _cҸ*A_5ctVj13Tyqs }lS7|?qDDXȯxD#F}wcQ<]mBQerzmi搙y:d/0~u`EX 4,G4l"ܺ"^@W_p7ᆷ*cR-WgmiM10u#=">M"T]ݚSOu* :ksaq@C^\s ֯l dPJ.Mw #@ML"wөӽ6vgsN*B%U@X76e0Fm5"ƺd/~lw UӦ5HGy/o{o58B,8e Owecub$W٪5KGR{ۮ[lnp|OɂX rj]'? iGc1RSU+T}֯F/^ooI~_`U^AE nv !ŘP3;Z6DzڛkB^8+o`^ۣ*v&`퉅D\՛@\_[D+ZNp_tc;XT,?5WmXM+&ʶkH%QK1!wV-xV_ӟR^vd ֥Ns$yyФia f9jJ8h]vEG`gziK=8Ob*xL-,U7j,@ј40_ yDFA9+9W0Xvnw&)&va69& k;{v!N(zVpk  6|xfiyJ"IԺnmgYb$|ⶒ\VMuFOL}QQ@%6$ҥ$Ѣ%!}Gc`j^C,$kxW`(<_d iMuE%kbRkNdRѤQRc L)` Z4J.51*Lּtd)qNn@Ę\G3]v1 )".L.6R&um=%cvG1I#M$>>hhn?q-l65a_D7, q\ݚGJYoh"Q9?qE' ,=Et!n Y{ BbpK):6ngRs>zJ{Xh2)tgO뢋7mzm}h9S?=b꾀O iqѪA+I(6{*Y"kr.>'y=߼7;772EtfV]WGʞ0]Ѻ'^I|FY+u(w-]քN35ߥ7[Zȴg^jX30p'D2I.ƛ\ cVe/6 {'G1NV1Q-10Z;?m9=%gʄO \?ps'f-Jsr@Ⱥx`& R[K^hݩ)f ~SWЪBGf;9L$Ƨkqu7\OvI/=aHrΝn[h^`W\0Oy7/#H5!ZF(Gn@&|Onm~ -(! wkK-)I,hh{x3Bj+- FY87[z &zJ"6tQM>XlOPyD 03qN<vCƅ05_AK(r+?]䷚`8wȻ(!gi3{U^ŝGMhFk<2"P# % ؛@g ~UYqtS??H$r涺YFV(_[tE@:'- qCܸ3A xpI˕^ Zj5E;i8C 2Ci@E1QuP9!o'k+&0z k/[.\j2zj-ɝ Wm4E.=b=gru=2D)'3)2vliD%`s/2G,׏LSQi>qtu:c^j>e̒(k _pHxZ[2G*MT}^Zف𷮽'L<JZ폳Z G&c2C SzbO8A2%уX2/ϵER @Cb7d0&>N \VSlS!Jvn7ɕfV MAޔ:.W$GKX؎;dxmn%5`-l*FBg0灹b{OkH)%:-žP}(wsj}CYNko"W<$oO.!e23-ƫf*XN~(tsP jO?gƶ;Qm``#o4GP(I!trOy 65j D{hmk&4vXmP@ͫxDF\RfmKCb܏Zm8袊gsF9 "^z`j䅥FKmN. ]E*qEꠛO{_}Ck@#zrAn7!rMK~KWxjݹGÄg'PexzX-jGFGҙ~tm@ ^*мS`HhPlG}ME=i'c2]BG5p`Ae^;uUvզ؄XU B1TǺ|C2%KAx2 ݶ"% `I>G_i7Jį p! 3C fheϫ%l? Ŕ0!zջtaJx]]SB7E4ۀ= zw}vU|ɧ/rQ t8O&"!rDj R)S m*1DV)CC*K;}׌aFjV95᣷N+) JY&LENlu? bs>f$h mDCu=rxN`BYV__LBɉA#q~5:}sw*(lxZN>tQSH#ՊsnL/ pL!TB&${?߬w%;ZFx`;DxrhLk Q%OuK$i+o.l/Ћfɹ7p9CW^e8Y~̋%OMۘ9tӃHB9šU)l?;ЊjO<^.6xu,6EXe{.A9(+JHܸqazkU7m:aBat` q3WU47wSrGeHێ3**lW@2&.K tU<[&bf:ϑPD/Xiq\]G 54 j!A`thX^hg4:I4׮l80·;g)Q,$/B&GkسEеM-U ue[j{>GQl%j¼}bIW@*rH'g;b2w,`h֌ Bt+::Je2@M`&fð zԎk8%@N A`]v)$ +QpK@a<!ٲ8b,$ %_pDT"Yw(>嗽©F# $7BPzơ# --k0pTzQB2mqW8ŷ@ȷF$#^lB) V X:|4!')&k9?xęC08}EF^'b^zČ+k; 7j]^bҏ$n.7HO{kI1'S8C[ݑ'cE5 zeYPw}ol?jPYÈ+-Ou%N_SA'3idyI3/j"ʬJIRJ*zeÖ\vHNI4 #/q;}GuK/ :A DC3s[(L0egH]œ<?Tfl$~+,v݇22iV~/I.d&9T6dzK\[C7p [m|AIZKx9x+n'xuL S .t`ӽpśw]*Px4՟֤p*uu6E#ʃ)߃/{qX, L{BSF||jp|kXHdo r,TU1@%Q(3J7œvJ4M-W,#EUoY:dɑݸqr̡ Ls'ie}Ie]Hh>S݅mm:To2/GJ{L+LkntZU%VEI,,*Z?< VMȶ%㲳7Et-S}=y??/œ48N Oj$s/N*ݒxf e5_4)}| ttZD"qOn4 u2i6Ү-N{ nrgBVH^9nKV\z':ȧl!PT*tO Mk5f=NB:%>׀ϘGQxw++rEyVgSн_}Ug&>;2m^Mxw=A@(΋}±KP`CWS= ⣉NB@-6xs&4GJmJ8؝VnCc|M*曼Ʀ_Tmk[.nUHm$Nҵw, H"}|mVVp[䃂ΓUdlݯMS/y?>lt"WOl~rӑ֛H6—cn(h{ ;H?m l.^i1nr>&o.|4k# Rqu V%[wv3Od1S{Gm}Giz D!'jzgJ!Md5:Bz[q|Ǝ F16Jnꢐ|Qc缜]I~6拼{.Ы=d qn^Īgҋjؼy"?s #LңDR2em8NԪdƻH ?yI΀Np`y0^Pe@sW=??S&ŁL;gkCҸ&zyfGu [A)xy>``+k{1DUmsЈ ˛0])Gg2WpYFnD,8'p8SuBo}W 2kF.r`0gAU2$Re^#|]s` Y-~'ڍ{v]n- 3uS[d=[6˰*5,zl²z$2Q:Go ~DGb7c HLŷV jt/igu5f}N>(TYBu[۴;A7?kNĘPu뮔`#w3QԖP}x/L6@_\n5P/sX"-ETrr,έp`8^qP^dAjν3g+h됌ǁ䖸ը-"5[a2>+e65*cއR6E43;H:պṃ"L?ۑ[A(0ѴL 3_h`ߵ V6&Ùz,,DTUKm`=%iNE6~@ k@aHc)]6lBN: -q2+D5GqOmK:aєNr ׽cu|5@S<8mWòd s MuEֶۮx(Vr+ɏ_JAar˾>(i龢"i8s7]!d&䶬*?X~i ހּi6L9|tȚr7٘IZѧ>uiz99I6= f1 ko6*_́a(v s-.Z׭nC_ǐ(Du㈮ >qZD}/UWH8Cs+z䟦Fǜ/Vaz3l$ŭ=Aʦe,M)CȨG6>Ƅ FES 2fa`5/_S,& ˵}I|a my)I_L|:p7n`~ֻ5g]5F}DgOp?(ws )"%*_`;|tAoBhR5[$FǒUq Ӌ; i4 kaw_=Kݡw8 U0|BK*Ond>SgW;D1ƫ#4~QgnH_!ZA5;K W e@'׆:%Ld}Ma)j6_>-4q5ڒzիmH4%J`'|N231i:-㶱'ߘ:~j.i.p#ItXGb {1soQYfH0z;g(O_x1 =N8䯏AaR>>sP*XR`Υd->#ִ $N͏X {.&ȒMV?[x^z~ II9-n'OY-w XV7m٧@/G6Sw[l^p?8Z`D#+ÇG;e6Jz'pd&-kLTn7۶A r/D1 5(Z`Yp)|όAdhe:5?ߑP#Ɯ2f)t .ĂZ cDFrnY#qcbehԣwmy ivgXʵ &1G^f@ |3BcIa-BP8d؄j 4lZRmZ ATD $AzdI#D1U:PGNf%/^F~w}vR1Ql=vYu-:<]cu8ӧU4խ _$=oY͒(E"#G'@BUZAx R3tι^M.6sS,&([rOsOv4/__fFЭbu5ַo ̿kUN*mƯM:'uܐe^QᢴIMzE=#< < p,7ֶl*D0z6>,HjPsaL 5 FFgͿ%'wv6`>k 8Mg6xh9w:nW0 4?r|l h^6RKԼOJ}ŏGD.X`k#l_e^Ppp%f>q\ZebbSюuK_MM2O%5'NT>?y?YTm =`O h8*cp4sK}Uf7ѩF]e2T]VQ 9S-w9̂JE۹| ..d>,{jF0 &"ED3q /Omp Ϗ^D#p#}LUM<%Dj\[q2nB q=V8?Tneg?rhYp}f`zCd63|,j? U/k=pɀ㚆Պbb! ppEP Jׯ9k i[@ƒ{`/.n‹!so:FGF;Ϊ:Q~ZJLCW"1&bjN ^`z:% 79q)074Ռ%ߤn$y?iԩtQiY/:5\=b;:nGʁ-bUy4EHKX^h<AޭC`^5O JuXԲ $" %!FuGz㗞,݊Te7D[N/EF>T^8t2.e6IF3oar0У0ST$7.Bg |Bϱ>?sƺT1Îg5ܨm k5*NRx6yB*s VyP.cC,UG<ৈ 7#Q}R nEl&Ex`3a7),)]MH3+g&9l(7 t01zpR jEIx_1JznB*v_t!*?*(CzVyKG0Y@D#>L3vT3 UKM \zXy?+{OW/<l!Cf LeI IWz :ʷ^Z?Ĭo_O |Ыε8yE4'O3CJ"HSz2Ea׃ vlN+'{7)Be qfǼsWhA1%c0ꗛտ]nR*KNvPL@slTtCC~2`ZB ^P9uvټTZhyk d/:5}olSϲ͊$Hl۰OOJYICVIT&=[j؋o~sƗ62t&eaTPP DJdXasEWi*uc La~YV֩OIApo 5rMU2i_"o .ŀp^v Z\0n5qx@ ɭZlAaCFZi :@2(TX6x10͍M-lIZT+Ez=Mp>N &8VF<,nNiJ8*'Ap,2H ( ޏlrM br0"%/Rnl ՙ"ӓa| qϽQʁn/e?{{*ow8ae ڳZwO0(]!!8t [π\,hsc>FmM kƧrIDN `d;!7gТ~ov)WeHr~5o=+lФQGdb(2T]`axbL`4; LFXQ餗ZoYl_ﳇ ߯F%.*EԞb"kI?idj-].ߟecӺЪKI~ly$ al)qyz$n)]TLoPSîވgG3u /i9 h:Xv0/PLA2PтT9X-` g`mýB6גel*mi/_RLa͡/6p\DOrO' C*Pd,`$ _WZ;N߽Qpby-.@2Dc|fI͉?>b9iej< @xf-t0߭9_?$yakz7ad[k/>!r=X7(D#w_g=m('qP+aQtONkۆB6hog4=8ձ/h=TNBQDǔV_O3,:+Ro$>u櫸dSP/i9d*q5,HeN/&E߸![gm\<:,/ ^xbyxY]Z:p扫viwk%zs %@6q(%&vʰuju!8j|[ʬ7.~Sg&ͫh? 11>v\ 7vOZJ9!`],BO$ַsgRR|+e;c|}%.% |<2ѯtJKk_ߴH6qv$0^67X cחZjiw2ógcZaL  LH(4R{۳|?istvQp'>q^B0 hvZ_8.;^*|7Cx'6:bph̾ڶq5Yoo cΕgtH^ w>NdNK 1]&1H؍.I:Z,@ p*wC䘡3΅U dLSW]D [-0v ?/Ө$Xqqhb+.h\ joNU:)% -CU1v]7pFrq'$lSa) ~/]2I` 6$ Jt%f>[*=֊(l$$YNX_Ɔ,2SfKt=Nt^8rT\ :;1Idp; r%I4~~'9 I]s ':>,AOL|)\7>k,ҙ ~JU]{te(U5fH ?=}m8TU'PSOv`泀[~xa(gUg(H&>6oAmJJ>efKymJ^9-:.(Uk쓵 e gW14p$}g°g<]xSaHrJgp !/9,J4pHj_7Bz1eHj@0 iiˇ/g=^oP:Τ"SP~5bM@tO#?]"Է;H'B#yN x2ISRm1?L_B : |gEƱ֣ F+]:Fbu]n'!U+׶wCĨ.3-ܾ-cEm7*` /)#Ik;A@oX}ܧR~B fhg}J'$;|9E_(e]a*'ܞa#v G+C D=39ǧ7톽Y7Kp|tB]SF%mS|6' @I,pP|Izuh%bV] d`/L_K8 F;{t  ~&.11l^l"W1xCDOWAl-:Ѱ. 66o)}6L[K_S!l\-=1ӆ6o\9$9NEVI_{ @Uc'VF8&rlVɠ|Cyz1QҜ[+FP!lxF}B(feB}5`W_EWkGY p{G}6qGY %Zw )9GTmPO.H4>)&Vg>vxk&eV@Oz^^Xd`U&_]b VŅ;4ގ;1lJ4jݙ_?nܡ) >{ަo1HPD'sK=c4NL&DKNiƇx,ohdL_%}2P-i3 ߨ>󖳊[. f]ak;Z~(Q5JZ2'Κcݶl3jhtULQe(X'ǔViIw%%٪oZPh5lR@Jn1V sYѬvg!9m&D.PO%Ǡl=SʒF4_/kWG˲Dy~XcnG &R!Mh֬d_7# Ka _4Ca\%gp)xΘN8?Wr$l8nRm 70o6eH OѝfH鄹mס0:Ԥ3z])v6;MC}sQھ?4\@8V E,J.dR듪Osr0(^Ӹe)=\{@ufS6ګ%޹]26pG[_EâJlҏr,W]3)yu$e\2.2Z5ξj39X=f 8 ƒ+ۖ.=⦼_lM+X5})X?P9K_ ,㻈1̾RK5QCIS&>6W6Y)ļ3-T|i.&*m#J @[rmR%'k(55 #L?Hhj0L]CcQ2z\=3XKc5tMo}VTxp|'BzMst=MO @vz v6\>>XX82Bb-#`G3hz[X5ht2|dujaxwF⇨nv"&y;PV}1-8Y˘u8|͋=$ Oaal9:buzam4Z|-Nve`dٟȰdDg27Njcdy~~XBxdmB mՈЦd Q0ҏk*+z]Nol"ڽn(~z ʟrV (Џ5;GYI;hU.DC9 BK9OL7:/IVww@MfkKA6|5 .k˟Qۉ T]֋ U$؛OuDi XiR7IC<.p?p,w-?%qW憃c9`1/ZZ%"̅z\iD0S'!ܨ\ (58p-\dA7{;)uҀ8pQ| N+|W:#]-y-O_A5zRwp)TX]Xx5Id?_pLl\ؖZҺl@ר7cR) Mщ1r#e^I MVL2K:ݫ515a&lxYA=FKEGNm6t^:fk*s,u1W$0u .u)Fh3`29vM}cFR*@b{kG`=+ @<~6 p?h>_2?'SmH\`o|F *T]54,0F L9}|y| ܉8Joa "I!6=mvJ䒴 Dv^2!Kؾ#`2{ 𫑸|j(J`d{Q ]ᚿi|zgG_qDaQUՔ:h7$m5i?yȂ,1qRf:+![]}D뾲]`S}SPц(9Eq)Nz,*'壺zڨTPg͆ CނSDOhgMυuH"rяlziN_W)Uߺ/`&X^Y 0Um]xw⻓ Iגs=cy6d@?(qoqִ]WH=9aӕ ݿHIC? Z,š||0+Uu-ʧqQ1ȁ!)TD*럮[!ͪVwe/pE\.18YI \Lby^ P*)Qr4bhϨ0;S:x8c<!vGf뢅AE!^g項@dsmsU) y⊚[ |ؒ hPF1b$C"\-x3gй=%ʈŊ G# u2y,J_b),eN9QS>Pg}-=qȘ]v x2C8ڷnGN "1E\֮0Ⱦvg`2՞Sn[㘸*}"3E: ^a1a, q|~9uI{}'xͯjD0'WOSQ.oP]%/0/~Hq͗i02 P:"kTcz2$nĜ !Dpi9e.n5$M﬜v[*mPѲ y%E= +F mk3vgdjވ%% *~-_CKV͎`1t4^WFF^,:FExcDƪ<22mSZ+[P&% F *u|H5ViN*j=ea* 9=Ho~ Bʓa1l날NE W_zI 7/?B!VF'8). aS\Fq*/ ᅳ"+]yBg!">-yUQu,K(;+] c/NT^GtYJA䒜M8IS|LF},r'ZC;t>$NҴ/a焿aY7)ڙ0?/K@#-~ Iɕʄ0Ҫ#GF%U <gs)!/+P8O@AVX0 %5h!.3jbqk6zjokuABD?FW`h&2ߪ`TU '!r\˷C~L)%p6|6ϙ1nfA7໰o+l;g; FOF>SO;bhf)ܾcы1ײ0'nڲ]cYe4БE-Ws/DR۾34,˼HzuC;!V7 ,^j& M73 O{TNg>ɕ+Nt"\1D|\9cCW-vM`<80F"a% 1uFu4N/pp;ɮe:R I1=Rz̤C J㣑pDA6YE:VTIlv]mVz:+۴.2ءrǧy2' U}bbdr:3ʫƯ!1%&#ш{D0HD wU8Z4$qoAڞحxNU6S ~LXdr.y6O]g/DW'4ǫ^%~y쁍"rҊtDZt yܓWu`-_$ѿ5`|, xwF箱ҙXӗ-/X0ȰC2w8Y:k؇Ѡ<#9 LqE`&fڂ3g@px5<+U  J''=s8Q쟳:\?h9*Pݍf/zD ŽP a4߉-M҂ TBT U&zP_ri<6P'c֏Qu`N]YaWI眦U;kʆrOk,"N<7 /G]S09k_KSm.QZg(Td͜Cie_,Pgns cq_!;oê 8V^ Ua@:S v AP; t]LfQZdem0eb8Ze-|ExYcӉq(% _}㕴v.$RID4{U2s-Co@Lߪ$raY?=J u8cхe?goruN ʉgwHNmn`g'Z4pw203 㭙|[،9is:5PK{ n#38 Fu/D=LBy3V[~66eW ʄڋaJKk $~:FHҶYb dc 5Fk!NǾ_kuexڶƧ32&atOTdtMy`b?@v&|)̠Ͻd%& L)Mwr7qє"elk-9Nc;9ylDo{ތ^tљihM mt.RAv) "OeY6.Oy)ht (!LTcFZ咦ytb Jg懞(zacB@3f SӲ 9~CsQb erR0|Y&_RYLtA2K!o{2K/9VX2e?6XΊ1 ;iv +!ə;.ux0hI[= sbJ4U.^jx ˥re1g=gî8~Ɩl@=<|{Cɋ6fjM7o(zBh;•XONʘmXϾ b .GM8ӥ ND <`aT?)ҼWؼ:dW>@Nw>ɥYոx'{qvHY1uWq nK5\QL2(}#M&=<FLGʒA73N%4 5 E>XTqjuN*D<@o(qչ80}v{HNgMW 8I(D㧏S4LB(}|v= p 8{QAfRh$4|!Á'Uvl̅+'v{15ҲȀܢ!GN$$Z^_97"ȉڿ D_:bϔezϻ;9r/i._Fo /X+5f "nL6-&=LUVd Yeݒ W06^TvnQ!*-]u7۱D]}osM%l519p¼9gǯቤa+ug dVt6IbqZuNgމ/F'4Uw>,Q9M^׶L-8ErYAoF".,HlI`yCYwkV=:Z3avtz$"'@dҼP!Zۊsww|",~ڥ9'VHbX:ۡ6%ʶ 򶧞a|ü93OSo>LAZ.@[%ܲ8<DzyHK\i#O#0=Hnt~o~E{6WD^lzsaYת~=4J 0QbjN,H_[R'}n |8)i&(2[hZkw%\ZƥMˍ,$dRχof.uE;_ŀh*'ٶ(SوNz%}phy8O9'yiڮIV_>Oh(dk^:[H~ob%#E[Chm0mtzΏbw6ATy PwU $S Α8||\}! Q+i/r<C?Vo hj) :8<ͮh^/{s*jYme` 1˰ _ʖ G :fgˏֳ Ja߫R4.g{]auôX&9sN=SeNQݑ=R\rٳm~n?(IT^^\R1&pZ H- [N1c]tEg5m- ?\;͊:*kmNu]ӱ #m{[-c(8n=+yGΉ$eSȾʢRqk@oh1<5.,V/JNW+fynHմr:B;Wxj7s'<˯ yJ'*,!)tzWj̖Js<\lkJVTUa%AE_PLĚܳ 80Q9F較hC+rÉqVM,7 =DbxOLo3ћ 8MşMfT|Uj;jdQM$|T5p_$Eݡ{3Fq_fn1WLQ\ DPΠ^x/,/r8yN . =YϪQ"4㕞9?.|A &uIwalRwa~d~PgVL|v3 *ѽNjxįN?%()vVvO|i`;ޙ=|@I JK0Џ+mnZd._{g<|ooLIRX YXjnRHy^?{2'(.2pxrY)= a'$JŤ85'6)I׍ Ɂ4JHCumt-meT؄߽&C2sgNDwcJf؇A*'i(Hn!+df:݃aq>{msR.7J"o.B#aH=){8ݐto&2~ޓ^d/y.y]׃IJ ⾅ܩ[I܏!* ޸Ibm,5R(Ms,]}GbQ yZp`z.h`»}~\ G!+Bm3M^1 ݖfE/"p OmDc sx 7v';Y5wBHħ'w;$ Q/`Oy~=~re^;a&!0!-2'8ﲢJ@$qY0ZT4 @dD.:[m頜2Cm@͘/U("Gη tl'K\GYtS#6)ը[  'XGl?D% 9m'ص%%&_ x.TtpIY>Hx8kА0Zuo{t_%SQC1&q!(ou5a bv94LW>RTPmm;a ~Bĭ%D +*/ DyB-"f~AZHc "57Bۏ.P$Vg>jMbO* SZW( 82 CFMΦr@*z3%GT&Oݸq#gZ(`˥w4ʓSc--ij,ja*$ y':?& d Iz4*Ԏ۩ xF\?|]Dル U?Y==Lݥ qJb`XTCo{yEIVn\R{ܣŝ6h[dXҳk%`ZGRR}_]d뾎-NO`,'"6ZN3-*7k΁vr[p9̑bu+4EIy3(b>F74j{i^{e' v9rBu@VB4"K҉I*vw]{,{|mk 0)nVz#6- E-.{[BNSk2X{ hvu]o{@-:ŧaO,tg罉?+Klf"Yl]bo}Q 4wێG<''Q}:c{/:);." hܤi <8У7qܹ?T2ڗ{HJU r iu /0X?;1Xgqgx@:, fN'kpӻb*iRy մv !}lNo:v2w{JruM2a!8s^I2C9%ֈ;8$@/Op9GYn~i)gm]piLL%yP4kE;chA'RJnǙ\tWc\p׸.y,aIL {4ʆ:ʫ(ljf5Q iຠ"B|`akTd APQm*hO>ĖP/ t&~R67iDgToDXkZq==nWRȱ*a{MKZ  "]Bpj, vq/X{I|f(hl> #]`/i1Xta8MNmFkJ*9ϴÔk9, :i\wB8萪7ALUG'.QbQ6b|b!6`j,-͟T {m_"7 E 윓_cV9Î9'eӘJ釙1L=Q.;b+koxV 6ٖR-mUHZRW!0P[Q[F"+_BJҪ1funŖ 3YeMYlŃ?ҟl[9}61(5=\tybp>W򷈾\`!c'\o1lɿw8X)iq9-IH hu1}2U*w@p %nLv҉W1`4uyE5!pxjZ5(nCMXQK4̫#r *qɖC;ki9 ㏩dbXP4Hc&V\0|NTHV34#ۼ@ ݚ&8hzԁvxrBVγ6oD/;A#~'5h)eݍ vj;E⛍_^+Ro{651 QcSl9Q[+brp-p-TpLn1Ya(:qI~>hMVN 1@·pM{5Rpn[( PS7` mUte: _xSg4uG@Vឪ328Ef+P~^S-zQ8 hn5{wl>> ."ǀh0kB3|q\?髁sfR2|*,fکfc6? M$GR"?u|,g[ qn$:͡kL5k U鐗Nj$WS4Ǔx^'l7Wku<״J5lHzmR(|Pn|"J5ܺkbrTTG*I, UĘmpbvԞ`a=mK`=tW_XX&4; +kt"Ҟ~G BǾPY 7,0bpk,ƧiTӼa}P4:y3&y}!"A:K?371kJ7ZB j,bR#?>yWρ!aA2$-e@GO8S`Q@YP:J֜yFܤxm4AOZvT$fZdUlMCY>l ܂Nq2t,YM{Zuڷצ&h~ڻډ%:GЄþ}s-WfqLnP ٘eV8q #谌e*:>6)纱a0g {(Ł xӂ.b͹$SX3ΘFd-.1wU |3 p͖"׶Πo}] V;<ѧGj9z|pi 68ܹU^ [=~밼֟+T0qe K>5qtqz~nʖ/;]E7JS=w<%C:$HSo<QY9.x=# |Drԛ #f Xv4fr dǻ`u":6o-}&iN}&l_w8`ٴɕ$bL^< x.R@=I.i+w~|*|6eJlCE\-9y܌J$5(sh;tvF*IJc)puRy}=f=cQrp!׀nsȇi-Մ´+͘o40<~{|s$hz. f$)MabNi;- >k\0|#NKm}Kl#]Gې q6O}5Ki^o\k KkWNaXO}UpqMc"̆3X/Fo& [8Na\#EIhIuŻ` $|"}/U]k.a0?l돠wwd ^wDCz_n 텄+6?\>_6dB~]H#U6=>yw]=`Ĺ$h 7:2Ig;褗ayajJ}+PE2'<8:d(AR(အ DnRfjۣI"Hа>O)PLfD i55?͞/DVUXqw#LңH]Qv k |]-?UI= gQ @%9*zEG"S;/eQlx0CQvLo Y#Wࡏґ; aJ訁Cw^\Z-e gY;Z7rwU|zsVOF+՟ti;0c^ve0ﰇmeSCg]@ҏ0୺cI E?G+Ɖj,wҭ?qq3;iE>Y?zN@]#򵃬>YŸFȥ/+OC11wW"ˮ^ AUA9ZoYU_Xà5鯖Wn+=Vf$E 0*L{Rnj|Lx.`[-5 7m`]0.NW`=>WІvA:i½i$<\[s  cbd瀌;7/(!!";Sn}8Tlڗ-!'yAVN .l(T=v!y0=9|k-c/ӛ/4ŇRo!fzg'2eZx=+}jn,Fi=a#=lfZJ7F\a'񾊢|U,*U㋣=n4@|i_>'V_]~Et1P3:q90[UkۿS(im~M Z%ۏ]K{3o@]e A{`u5jݝc͓]Q؋'vƴs }+a^X՞`, &"D8\| MZw 2dL`M}/*mO<) fF0`Ѕ5#h­ѫ :_5 C;[c}UB)SڴsGRˈJ)Wg1mq"#V Qq%XK [͔x/ir Z+T<+ص!VeoJi_Ml%Y,-f=yvUz\̒X<)aS'uW\)3yI8\6OER^/l WtPϢYkК,ZXcv=s7/a/ivG]=ˁAء;O &C,@R|zU Ui.ntEJ(@5=u.\LuY|.KP}| T+,\/MiTpFf"j[KJi)1S%?pIP*<@q|RZ)>uu盛%`uxurOaQydxG:ܡw6G?;H5ѣ*F[LdBi;)ʙDD1Լ}uf>͇֬YDsWNeN*}ߖkS\QDPa$|">h@B ! Qq52e!UXřxi֏ `s65jnNHgf?p)ܽiN%K2Ajz"~VrdD/Se*hu_;+[>EAb}By!eq|9󾜤;mfZ Q9 KIzi%ifG4qr3W&n<$LǴ3/8SCzγP(4~Lʺ@ppQlF 2RDbM]Q%s;m(3Rf ~5hY9'W>=ޮX [v>B6-dz0L©dz3M`"l䄛&2h\沇6?? 0lx+LYת2Oz(r1٬u%ꉩR'͔lL5/mC8Eѕ9v@3F`K]Em8 ˀ&.PlMf9ߏ~/)Wx>Ez(uИڦ@Ԡ{4I6~\&l@Ɩ@CNYEߍſ`G2 ']d֚U j%AzS0!iC1^)(08!.EZ, "O;cz`%S=(x'gF/sh{~,(+l~L{w+<2scuTN (wV_H89ւ1.2C~jHqspts 4@dZݪlތM#8R=HlbxYiIpvQCuT䳬3W͐n`pN=F|iHX\Mדp*„*i0>3j#K=j.mj `5 .׻.pn,6(my||~*TbȚf4?J&xsLP)\E=6o2S- `p N3xIeFUP@ʠ'jȓARp@ۼ\58v?ȿR͋/L-9Ç*cWz\U쒘\n+UŽA $jOb&ADQZ bC.wduk#雐{7x]8AO"l^݋\e+6Amރ݆bb\ZI1ex!sI:wlJ&?]knQpqJH~xFA-U{u{ Ҟhډ4Q/;<5*Z/j yxh_';E|0&5WB<O =cq&A]=D-Q,>} _ekڛQl=ULq6CpP?"(ݍX/(J]@Qa^q&/2gTp,. +z{>i[tI*,[gb8'g NVIYW:t/;Zz<]Ϊo~/ǜ5NAT 77;olGB:yDii5ӬKl¤oehYNοmy+ZuKFD8<29T) GKݓK\$@@.-8bu\$-fC0)ҝ$Z4wpLŧjxjSK3-'Uh:B*(eﬡ xm7]n*( $~jJ/(F`L5˥'Lv gN^c.׹U3p'HW)~΍#tPrȝu6e 6Y_0} b2`7k301 4J-م,Z W<]:|[TP} {>F 9Wd4Gn%zV 4u-ȼRѵ^zCRCAwk5#k>^UtRg&`ug\je6Ton]9)Reկx?v!~k~f&dx7i%,eɆGgN .:z!=)}w;97ދheY8jGTU%)MevZt!0Y jڥAq8]Ţ҈.8x RpÆ [%|7PnC'LfwDT9 #Vdn"gJbJ,9" PM#r+9מnsiF_Y +/m( +-Q2CjAֻ[ /1_R~+!˯yM&`R G=yOcA{hm2}HSk6jK4FD%Ď|nY$ߟq[]^pV4Uúp-D}MIs" !įMCР #U=#ḾA.xCذWQ7R7s3a|*OA+x|+IM7J'EՉi1bnW\f.*G_FA~Ь&/WRdSTg:hN~8 1gp(͇k|1޴h*_w@cK*O;؋ )%*173DSϖGިܒ V^xDEQTB3`9,\h/h"d4 _΅ % Z4s%uR<[K-niwㅌôƦLۂ8zfozDLD`qox QkIk[KΩy&X1 •&[^NJƦXȯiň#9/Ѣ۵qOPWy^;Fk$l$aR+:D.؄کrR\smML(Z{1r0ܺaEe8v}2 sx&qGy">K;j!dvfLై-QOɲ/20fX*BMn]{; A=Iva-4&U,w*OKi 0"#(CÆVz6(S1[FIw:x0r gl?EvjeoR%`ޠ[]&ȡ"0t`O /wV u,(w>qf?տw0c~6 RY~NSbɀߔGp2LƜ|E*ͺ\!uAַX5~+M4Pףn%{b}IK\sŝ%Y!>`U&22 φ8}f&OdGlH̀\}gؠ"IzR =' d2"s*ݥΪD6F®+%R*uh}x=$jS_׺ޱ)9'7{Ζ , ;לr EjNy9a?oK ]ac>$[b>9q7F{8bc"K0#P8" 7CD,g̑E׈Skުp+ߡc͞ ;@gQ̾&h[< ofeTO B&ރ0&@tKǠnҋ QDLxxD09ޙTNU*$B}JG! :e'AxbX(jPgCU,Y.0W~.2SEr"7%ڌ{(Q}%lM;NM !ÈfN#TZ>gDȼdu1|aG.*볌MCg^a煮cuweT7I a-nn߱:Vbأ:53\C5^LJ/z;r?ֲa 5b˔b8yqm똉gʷ;ia*I–/nmA,1K[2zĄmJE*Y1M2:0GB ,)pUI_|c . ,`jEvwQ+-Jù57R.ȝ.Ih۝|]C{P=|Ƌ ;B rN,bD v J b#Iؾ;Dh` Ia~zʓR:pxΙ^5&V5DZ,;q 3"odČu-ReFi!(%FPk encrWsnhF)E@ٱc=VŽN F;GqQgф)A$m,%w2 ^hLphհZ*jOyay}!$UhjlnCح=m/gJ.7GLdHEr8bc9E$\!RrXOMQ"V ofKJ&:ŸD vlj2WӠ?+qR6Mg.D؋t #}D7AM,@,=2Xiqh@(S x/mDci,V8Q%O ×,%˩f>3!u%]獯#%3Kpɦ(p@0 g=S\n F'{o? ?H+ܰ0̵eׅdN'D5u;rݹfiL 줠]'Zmz AN~ VIOR ] ?iNXl8b[R{Ē FQ|f=Pc%ݑQ!řR}'XC"o ,Sk2eXa%@ lQa{e!!U1_₷~Skpr'dvM= T|dˢvۜDq^DSrk5l7Ea7LA[Wo./wbكfл#*|e@\TR)YR Dnv!/y< 5}"Q`ђV& Јq;\ iV˯KBeͩ450P-rJ&N籽HJƿA'd؎yUW@S⋹vz#WuBX\Cq'.'8}J؇8U{e A K~° XrӜyo@,K8LxjY65 KfHp图D%h8qJqΞ 9 $A|iø2gx\^ku`6 BuxE AYo632Kܮ ΆuiݾSH{ΜxxmWy!ٵљ59V+9=Vw>" u] 3Y)`4GV bXшq"CQEl1Վ?b@Pi#j sTbi($;̰ } 6×&x)AH:W>{{ `daNNZ?j&lAS^NAtqR :16% jI,*=>wY|r4̃0wsE:u"clKy;+~ %h Kv3215MќcdβW7|7oާ OˢZ?1rnzsP9;xZ|9ۘIH9![~wT{ e?p$U16=lzGDI~nyr'GQM* _M1mN>Ta{rZrÿX1b^, yy1R|zFƖo=o͛_&~r JxpJ_^+^Л($:iTI<%RҸ0`$י6Gs`h܄y Q& s9f!ejX(D<B=:{tf/5ǚgz IZ:$G6R3--,ꔀ24j6>o*C @l Tz[2Dʮ-rY; ;k\?=`G@WA.W:iD@#Ix%2*`VȂ"{4~p;e=ڞ]jò*Cj/G儝oe놻EHhZ0r(]Kd Oz] FWEn-UfI3]9~Gx1#!sG*px͚t[މmV5{jvc)s/"93WZCGtnka=dKuɐBLE5_-UO[ A$AOJX)GIaKbK~ ψ`+4ߟ-nm1Ulxpd>X)Ƣ5I;EM^.DOf !T[i79SϮzdo6Z$`]S0Gp)hf9gøi\o&'V G3n}bm>E(ME#Gsok>a1rFTF&5ntjdPb՞ ȘLD\'}LКX鯮ϸdJ'ݮU1ףpӍ/-_8 д'5SAd '|?&8~L2xrZBX,S*J&_ )"iNCF٩PDLvWi͸"krFobC{_5'$Kҍ &6Ϥ'IQNȠ}%3CE+ɕ})z <gM>?n^|_#_EfF8 ?-Q#[继w7ȼ$tnwINخ3TZY뙍Bh_ZZ\|?3yWZuS\?3 B0MZ0XJ%ȇ8ߏ-㎘r+U %xOTP $nڅV o.S*Mq6 '>Ol*75%h|TyN fbWH%jU>eh{ WHPWlGCļjd,yR_ʔƣsG Ip"op_yHe-eD2A0K:[蕐%/B#l5^Z6ы!/K8%d^E?@z^! jp$փwOۉH¹ +ҎꝓВ ㅋ5' 檔z'[[g)t9.80,3]n D9&vJ%s;_">;I"8FgĪ`7aaCcLt(v*wCSb:?Сhtu%lids$$v&ҍBG;lmb;!f`$_i7ڿͱ7{1̐ꒉ9fVS';I./,| Y' 0p:C, <ڰwp]\iB8#.M%;2Be"˾T <:4qb"E=2Ua@:IE$Ylމ%$uz! &G/2T[|ٿH*8h#ZӨa+(y8}5"KbauiRq?6ʃ\gst$Xp+2*B TH`fX$d"(UođWkmj).!F>,V=Am&hJ,H n""ڗto`pjO%x~Ns;T-|ͨrTyzJTH0Y2{49ltrVk&.y<~Q$sS -qyS0Y,o.c.!_`*^^cN-Y*{xUA DUK=]Y(MKO\޼uy7|eУ0@OMz>a_\zgH{Ƃy!;9O0A݆vWjOo=$԰xjU ψ1b>euTwQ{PE)ft)c|k[MEx/Yb]:7/ DV=>H+E)6 !dz.hmeH|Gu6 $觜2۵;X&Ш'ģu aE/ Ýǚ7m 3x2(fFO LM!o&wXW+;KO4FQhkf" ꇨBإnX_aC + Ra{/G`7 T'ksfLKr+NOFv}ȼ:uCą @F{פcA_J/{o)b@~y-GU3NJS&nCAJS 7Ib*0KHJle҇X~( n#i3; '/=aF~a1דō ӰǗ9s5D瑍zS)m5[rWW$*d(+A'*}ՠR!ƾVxK"2zG?v c}]qw\5N"Z(hHȩCIH.Ik9(!YVoV5ם;7=[- ٻ()b }_xz$YNU$ 7*A)z5(wܛ&yvt`*/°AS=EW9}J:Oas T-}<}t%E+JjPַ:J,YeiMT6z#01;F~oEֿ-m|P!n2)^?l^^($C{u{ت(]Pb#؞%m, s9U_ϕuQ֠<K}ĄR}sNˢMf6JQpqoũVt<`rԩLA>6D=U+_>8J/kuyg %4ܰ{O4\ې6Wtz7fZOF (-i 6/Y]/¬bOvwnztYPmn%Omc$z|AjŎHKˠp3:K T#Z &u*%InKY*K9OGG2JLhM=mtk4XtT/֡W$9׺<2,0ʎ2Rm[/:s# . mn Yy2X:ˉGgm 3o=DUB|WJ3~@U +s{q1,$w{zQ&Dž֝Ġ}$)9cO14 {nykzMڹ.ZX`h`R^#]ㅚX 'Q)jʦWC`Y>נ'>#"WMA߫*e$N??V' T)bf'}11tmrORI 5,04OeASNuӍr$2蚶㧍s^=@F!9˙spW\lMݎ!֡JMASO.Y1. fӈ⛢O _r@AWyA|_8'8? c1H#m>2Xi< 0Jxd-;[ݣ4qg'vPV|EODmgQ__8B-e~ ,--/MRޘ;A)eb#FY*p}D mY4*l1j7ӂ% Nzs@,eSϱL*r 7 ɾ$c1].![gjSo mxO$KiH %i_CX9蠖 ^arܿ`w0b8cp;ޚ^OVaJ;; ؋Xc,pYut@"Kڋ;KO _d;AFt|ʼUM77Ckףl:Qhh6 n i_*NuIŇJ|3آϥ1.4+2PD)FL-*k|s1_ʴB2/H+bk,l029i+m,|\+] M>3&p9lbVlwz>}P1@Z\ f>vǘy}C /F,!_x8Rn3IFZoQJst }*xndCӾ7O!]_"8Md/O}xy!lxk@@u] :5}` ΁z=@9IKqAFT]*&쀋^!$܉1)A˲-z; Dz Sv\kO eEwq:4Sn1"FtW‘@"bH?J=0\.=RHoeO5(&3RQ\*4 R{+km2RZPŶ=\S8]Mx9hN@•_pD-~V00GƟG?xƅuXi@O͑]Ӳr\,u gGȘz06sy?>3ɪ΍3μ":pԨbc[(V(pAMۏ-0\ADGo0cFe,ZNc O;q㾞2 tg6kEgol1]6 ~c|t n6픾ɧJ/wG $x9]}Y DžЫ2- @2guUy`EޕHv/FUkNѱ:{$قGG}@Zo$ E{nQ|k Uz^z ސaU4T"p7(GXǥ(GR;r7Q֑At<\M ҧ1Rhse0Ĭi[^ߪY}QOsM ?PL}X+__pMKp:۫wt~ M̻WOP3%`]a<㋜nD Xj8 r٦ofDт- a"A Fh\ӑu݅K4~@#Yt)!({G.Wm+euM( yX _(~xG]<%͈J2!0Yz %Gz4̄'m XIou4+\^i?i{E ً3Mq϶q 58dPTML!c!TaY K;U}󍊀^6_D7v=hCX L 9)@ &Hu:M*aM>u4U,L҂)dx"2I~X x @#dMܕVt8ˊä 5fu$4 iz% dkylaxF9%D힫 U"}.b@Z33ެPDTCkuJHiU:Uי4619J9d8F/p)\,p؟_X}Mef6t\DAf.7wEEEޒy".JEգCf4dF*Pd`xR/lL㫡 d zmcD-!s='t*[>~羣 *,Ŋ[P;,sxI*0B?+]-}޽ ǧ[A!0Lħ1U+K+^6U<샖uD: bea0QV Yb-A?Lt Cq+{-iW§H)Ln0 WM Ǔ{Qӝ tWa9*% ӜHsfؽ)N֟Zo/t˒5{*fG%l %z7ZCZlKvf &H_~yJ :aP$piE$K|ƺ՜9"$ftM(Sy]֗y1߾ง8euCsOd- H PI,e> Ly]|W' K@ZQzޚ;ì Q-J 'z@=i C2\b0> A,mh@6S%0%KwldpFBC1i\OX]X~H-z"@O}Jk\~OH!{c wD f bb(^-nṀM7ۣ֚ڪRNuz  qr 1&xD19k)y\Ɛy>*&}Hѵ f&]3H%w`t-4ڧB?gJzbP'ɼCaP&%tL;S;OR+tAD`Kid +Edh<#[@z =tb$ՠƢ|NJ'd~#-#"?{^7Ѻ:Cky`9#OY}GPn S2 +HzmЬZi[ u'k_7) ^@ynJ]h6|PA7p7^|ɃeӂTh0 B,u¬ t7HfS'RIkDܙW `HVKL83;Yiv 6^W(n%}4[,60QDC4BdVٞQ sV)rCހ,n*&NMVN@Z+D(*4˖PI"ma> zSA&uvdzKF&d^*gYES[^O rXcN5ŧ8N@civB<eQZўt X(jy;OX`T~Zw&ޙ'5 }V#J%fmr=EG'$ȎAq+A~ SY y!Rt8u7U\uПuj&ABV͈>U+ݴ޿!~ rMG%#܀X4L@)72~OST=䕯aU?Tt;\OxnD!\@A*hMf3uآN^PEA971\D(T(7m*r(aoEfZ(D k}r'd'>צ-VVrׁV0 BUa WEf^jFgMGWoŞ?Ap6iTFqw'TC (wmB' &}Dc?YgqKkke'ߟ>oA&HOBBpH%]:$,Fg\mKgr=6=%Hz!$]J*k^rX~`jlG3*!p2o̖=Y)訊5B:鹆 ҍyiПi4wz9zmX/ %!řT dɚX ,XDB[&9ШQ.kKݻ%ύFz\\8P#%tĊ#UA>jY\ʂI:z{bP%Nua-KMoqwMFBh@N'_@K=:$l\xhXWZ[E9h'x5P=/< Hic"rP>drK4QK##n\B.Lj8;iJڞ1u>s?=0޴ajy?XY [-W5|3G7X\Se$obC7{YeMq=l#%HDePqvdwv XtH],M))JaeQ$ pmkuE)UUk~%2dL]CFM5=bL~`ڞx~>7ȗo4qӰ1ʮz3(cм_ ձ64fJ癤OZ\ŵƔ ^8M3@ڛNrvƽl R` 6LnzwǺxmU]TodiwObԭqڞ-Wg0J51`dɸa/ysTj<jk BwǤC[fw5m O}9ΛkTQpiZsS'KJt[^0Rs?ԯ?xMn|kWnYʮ~t!OH0J _DChy-jk DxX(4D 3M紭/7k>?$c*y"]WX&cGd~-YT!W&-жΫANK64ݵ)r8{ҍrivhev"A"']uhM S4zh!{+ÿl~1 W+ri|\A C?tC3j|Vv$RD'jAG`4&$%ImN^ yX6VNB{B]U%5Ǜ3έ.o\X͌a6r՟Ёt[W˰ FZ= Tz&ORiGY$׆|GG5nJzf5:<іIנLJ-6lw븟Saҕ=KmtKn| EڽM̝4A p fMp>4>I\lSOEDk-%xaYuҙt5ZA+ByZv Z$Wu8_E%BEmB(äiTG!7zm} M|9T33#Fa,q=_К_*5H.?"PcmChiۚWDw=[PLÙM;ܝ3b`2#`F7I̷dWGza{R/NL>m&6>eK,Mko-MZp Hq=Aenպ| DW\Mn! +{ rh# F:+ F^xC. ?zNÑ҅ 1XyZd,%bcص@[r. YfV9H58+R^6W,@ vb>z[;{Zo=3E0fт= W7ֆE~qRr>Mhƍ[W/FkذI ,ϡ&xL34j5JP [Qoz0}' y|*Wov*F)L")'>T0D~\: L֞\QKp+YA>!qb. 4*$`O&Ac2b^N}^b$r] _>o3>{#ЫO*n|jRe$i7}&pn.GA<<*'><5ֲi:cr6@8r%0QmЂ`N4=ԧ -w0h{7[5>fo__$Ǚ};:$i?4fV8ns~,3^҅꿬R܂AR)Ѱ]ksTw}F@r?b;1GF5pc7u>fXzI(W?GD_TZc2rB5*[H߭N RV֤Cgi@_:ƸByDW/S mٜ^5铧cCV)A\qqr \ʴvӌ;xQ*肬b㴥xjBv.qOO\PJkd73͜٠ ֤ܤҗ;+z͒!ioqlnfo\Mt}d gTqF$k R0-珻Ft٦C,&o|gW!"Q#%([A:It6#/zHe91 U,eW L"3x}wDE*v}hҡ"p\TUz6orߋQGv>Sd0 `h^!Eɧsjk=yӋPmF`zK>sI Ujpl"5U}3Gn'wO8zQAgg/V/0[>dct-,h*;-iJˆ`RTΩګǂ(R~Z`Rح%\#Eݗg=ɝmsh=j(pmv6X^+mn:Vj7E`u98gwWLLݥ(`HakQ2^$u:,@{"Igכ633kwˉxФ#3=yɲW>6 XMg|r50"~0嗲쨡Cs [ EAŌ-OUvrLw $JL ]|$ #K!{(Y_eVZP dc7 &F5N BaUH6̫OT tZqWI6[7X*(JffK#b&Fv萡"vS5/ n kbWY7ג,?ZvcAdqs  $ wvL'QqQL:!96>\ͣ3^}(5gB N¼izɹŠ͏1wBѬ*XRy("8^Y܉I;}KIXTwԲ @dE:( 8ն*"Ұ8/lxq306=TH4O |?La^MLÅDڽt_m֘y51r@cI EgƟ |fL'E&ҥX~Z9)x\tiɼu<農P{q)XH8Ծ(W`A)%$r@E8pD:2]W!m/Hoi lS xP)nv?:FK#6QPM겺ۻE&^qcfaB㙛L81K۬9D?5%c)ʜzQ1_"e-z@Q[}JVI3x?!V3+$]JOeDkIk!;g8\T;zxpݷj pR8b^C԰s+uҖk4,sEi b1NkZ "|="A\AtN[CVF$?hk0q]*Zy#W2m$L|1}saK#YAI%zdfMMX*(d`//} jcjasp>>T*무vk)/7ScGGך^t\Yiˢ`EΚ!qL/b6cBc/NZ?1w'W (נLY㧫Zq-Sk 9 -ĥwHL1cTWߊM^wf&6ȓJiw]BX4o.6ycQKU{Z#"K e SJ/pku,\!ɭP>_o? bOʨ=Sh %HӚ蠰͒a '.Ƿb`O:I o\b;O'X2q%Ӛ!4E<1vwqю+ %=JdQ?𭮋|ζXPjYHA JDkwJ,I _n.O:dA fV\HcQN.ltL5`xBT,RDtkTA$@^F £ź$lIઇ!F* {V˂2-S))d#eX =r[ zZjmM֔[LWq8ÌӰ|Npсw{P>cgցEMjsͪ_i&cݏtA5BeN`*s\3Ԩ"hz[B{%ql;`X<^B&,79OX+=w\0&'sJxD$RV އ2Y0h/J_\X;Um OafBQ=콣m҉3cJg?T0[!kLHDe ~3h6U<9_vv-?9\o]q4_ KAl鯍s[S2xӦ `FĘpt6N1-FPVoPg!!Ne39CGOOet2A7`9Z0O<0ZLG YX2]]jogz2R?+P"ucisI,Dbh3y [ ^_.+ oIVodo"/z!$dAǫ3ܚܞjSzSY, [SHpP5\ū&GhƁi(L ?1w_> ^AKΝT3M!T+ЍI&t}o3 OnO<,%rƃT1j:2jJN;aL^U5CZ[@^\͝tWX.BKdtquE ,|VV߮Yu'X5= ҡuHYHrOq#}|j;V~S:WoVV_KI"x&d)!#5+k~A1,hmS`I> M5i2S#ȍ$7r=="!'}}pY1~CPK 5? Ez%ŝN N$9K_Tt'[EelU (RuV>N,0!Sn6|]5y]]qk v" EJh#)(wG (' D(x)y'p]F;;ݥty_a/(rb~],*=4?H)SUd C_jYRHg[zz˿@a^&!Mn- @UL>qۉʫʍ%q7[}T0Wb jZhxQE!b31z!ҽߖ&P7\H~kh"M9^FawƬTe/]k|(sϧWdZzaFw7YpS^s/!̀|$SY3BUn./SdU xsω,DZm#Vt?0٩EU&ީQރ̍Dah|T=U,U@|9 czv ^Ea'cm0ZVCogݏEA)i2y} cR^;JqSdJQ3nL{ os> #MghJ, ura2+Ƒ%7NV%2dO$٫ z䬠VF乡vUhiBU {ٲq%SQ!XqtԍΞi1(n-3`e+V&M5Sk[뮮U3yjgV(3}S~~|c0UZ@[Uf,u:w65.< EۻyIY?r;1;tvL89)2mAϫT dGul ޝ]5O▙{>]$<{(Et>j #rrncGzGgbKT$'jJ`| 2S?1&R$~Vmգ+~ֵ5DwT:A,!b熊탆*`c@{r>=K9-GCu2"bRN%C_5|)y_6:ozNlν=._\ܳr 3;R5&RW`C_dv{c@Ϣ O" C&:S7T`d8ܽ5eZx?^'., hVPD3D-6IE k$|(>9׹;IxuC_H;y:z|ܜ젷~ ƳpKyP Goú?( X; |-^' T{I-mgUwi B O_R:DB硯CV+6H̓6PIX {hHᾙ͒>o$ q$L6^BcT2 &yeWxNE9 O k̀oDJ |0U I7xn7c^ IֲhBں[ET^Ko2]HGT6mā:WUIlېf\p,2:PC;TS^ae~Q8[XfAtŕ&N<~IZ 5 Nx3a`9v}+i< '_ V*b.uU/LG`dGq2RTtiu7e FJ;Y/C\Q'&P߉`r{f%QkEleRTeF(oPC boRh[+]]NLj0<&Kq _'yKJ76 &1@UM!Т^K|5]L*1!iግx9(J?5ZA]JnBQ1@Ra,v)'lo^@.ؕ1HN R`q5Rd)3{ks Q"d8wңX2-NȚ/Q_vչHȚ?㢻ͼ n7Ͷ4o"UH;;\ͧЊ\>V瓋 7:b5r Mp톌z&QOVqp6;y&!!sj7#|m7wG C0]Jb+J{y |N?|ㄯ#̮{eϹɬK˶ ҽ Ix|0 =i% j6D~gY')m2  ;hs{̫y x `Pi@X:L}pzR' 97|~O1MLPCLjlsC]4_u4Τ֨fSԎZװyX>P ^H.9khvIJf{76c)Yt!Եj FHpF<\]b?cԻRCiyC$኱9fǗsfzHJ|, &UΌzfY}B%@-zl-Pyywyo(qӑ>z=G,AP=kCS!kA*ͿNgU^!{\\M!~DE9e=}$89 bf1a?UߎO38f)}CMaj͠ 8Ӵtݒ%gElMT$]tPX>F|iU &Z_Zp )řT/t0|0"9rٟk@51BPI_`8m>sp&xz<']O3♳'1W̚*Q"0 ,DL%2?ͨx?~BL?C:N<; fqUΓbRŒ̯?畧;w̅vl@,f, v7`\G/"ޛљAŇ`n!{yG.7@m_!/*)l/M} #H}0C5x{`_ Ѳ؅h"e\e'0)Ujb!՗sY[?/WdnON8=W}o n" h? {p jRZTM.Rfmn$\0o`~goGthtΎ-*|7 .*W}">,بޏtd>36WEd+eZ?Dj-6|CpH5@TiYӟLD ^Ȁ~fܩ>^ -qڒMA\:qb`?'JdVz?da-]rKsΗ%Ekҟ-fE+ ӓ Uqڏo*+ J' 78F}AҖVD28<\l; jIuѠX!1jucĢ wm[l/(IJV+w֝-^ t6Z f0 ϲ!UfE'ÀBӆrT8xnNwi$ڕ$6M >Ir8r B]IKD]ePԼVxÑa o;7m*qwm*G?>-/ m7|c,):`YjiI@jóp7E^\\X$BSط<%eK־/OђIkC-uUY;T=GS,eWc?Tہc\blQkWHuhw[%σO+ySq%g\%N^N2)LBXH~lzĵ!Bȵ.Z 8^'ZIXVC^}.Srklq8Q~ ;s{[˺$䮎br-{8K"n!˅O&q,0o/z7"̐N2"2.T4cfSS` cV[(#X/ʮp0q(K0uF,ȶbl6K#r\r4 H cHJx\&kcqrΐ>$j!uEW RlVO #WNǒrx&wE /ӭ9T -)XyfF~>`۷a\k 'gkM +YѸ6rՓOyzuR=dLy8N&@2TQQۄG`!ذ>vF'tУ&i:.Ёm*YȽ{>RVb=_s3+7@oe0'u>vEE=][|ztpָg8Y}1KJytXA=cR'4Y5@qH,m=7W \B~|ƾN/NwFݐh IW.dqŢ de؛4fc2Qvg1HE;FvGDSg΋=Q.*f؀/c>+NCFHsi&\wAY:8~eA4hReepZoz#>.HTucq'S}˾'{z*)śm[:i/F޿z˻es6 x$Q1Tط.bWС}F; +I]R}}li UH\<;jWByV3 zR7x1(Nn+1rpkr f arvo;}\tj\+_CʲIJ(rC6,ׅwxL Kk> 2J%88-V'B̺+߷t2aDwd:&Mz|qOktH+݈CR=cL,>b\Np]||6*"辣ȝհT7w[Y*OϜ'0 YZkernlab/data/ticdata.rda0000644000175100001440000057045012774400043014734 0ustar hornikusers7zXZi"6!X ])TW"nRʟy,eYlD`O{>4] TǷ{ |~bc%7C;Ȍ^碕֨WL.KGԇgxx-)o\B r1X .8{Nޒ(,ۺ9C9A7֕Q /lV#:F?vqCû,c&qnEQ~gZfq@zsr;9U"f=)V~ܔ@=sTi/WMa[Ћ$CYeܾ JC[+Ī6#kUZ[zvf1Fmf8kE8W8W\E)3/Cİa'Q|zA2?9d5%&OB*穋1;c{4xبE귲"TE>e5Pׄ@޸Uׅvxh݀[h$y2Md/‰m[y@54,]J U-eڞIe3/ c 6teؤTLý@_kF48މ,K8mF~l,Bc'ٚ'z}3~͑XF lK ~)ma/a`@&&3&Tf^`p}Ka zhڀ[#ŨR@Sܹ se>x ++E"tv ;-p7;m/L~!Jg}cC3u)5+XQ|톼37"$g/ (uivshiCzD7UߢY=@˼_B*e#%!m`FhCly]a~TagTH8 uHmȠN1"|;3LiAפ'|]| u0i2mPi5MRk}bLsL M<(ÑL;ƷjS6uO_F;i׆: GH 'T>D l!Z~`.chpHh6BBaie D0“e@`:]jj-b Qs _R p DCfgX]ښuAz:ʥXm|7qϡL6;bL ]W2 gkL+S>B&yZJ\_twz?d0GR-H9+ÀPa\`}B{/!"C{1~!zxDx6 VCӠXZ*Z8̴~w3 p}|iwߟyS9٘ ns'*R&%~oͱ!#v2!x+}̈́;cxmr9kWI?{|?V1WΉ)r~{3s4D0 iKJ\^ .Hp9C pgŴIؚ3zty[s,<2<&5Ϻ_԰lՎ} #cU3sQ*~Q= -8h 8^2 lk!嫽v=ю?H$JɎ ,347RV9U3\Me9=q݆J+RF'oz!/R;$$My[WQy, }lv|1{m܌6'd%g=*|*7*1eŽxdm ,1i<+RP%Oe#)~F=5}!T 61,+_# e Il2@@ē6ݯXmڽb#xӶ4Y oYF:LY`o@:H|A[+i}։s>ۼ y*|敻}?$o4ӠsZ3s>gpӡwf8^|d)Bڐ-x*1:irD҉*Ȕ=hӑ>OqLJ`W.;h -WXa9eHcK+8~M@~+^NX ! P$[DuyH9)6:Bt3vɎBU.a~b|\Q?TDswqQ- qI-dÉ];xNrz(hoI9>i&-O,3،?Ezy@GD n4Qx`='E6B"@(~JRfY.}/rE0>d+5rK :~2^:TuQ}'q[XfvZb:C)gH=Z>ro36~/6n D-=A&B_,g| 3wtl`ήTa31vM#.<-@i&{džM6Zvit>RsTJ٘yў0;}o]M/2XK)Tl2 -ERl9"3UKɡXQ  g$L:^.t38yTn_j-Tt}jON E'kp)ְ怄,ln>~*DtL*d: yœ͟)]1]^cA?-?Qm f%~.e .|ἅ|]Тw@ԑ^4ŤNO,W3f}(\ePOF׻ @eۆ=+v:[I L1gŖh}uQ.v!~%.X{)Icx?WJ.Z%Hz6N&i'ig8'v+#2R9D Aw=+.VٍR >R1~ $:( f_ML[=YK4x"ƲM#cKu$kdedQj ӯ7wzb?\ȳ(8eo]qr~ÖJfz-(/rƠ]ר%ŒaS%T]uIu&'ǭL MƖn4I"0-:?#aXGb2=I0N>^,Sΐ'cNn;Sq" }.'3vl)'Fo/8sLl2;7JHm^aү3t>ʪۥo]eH LbtK:J=Z]DγrMyTHPX:R1$b_L!_jQMN9sb}mrsJczIby*$~55wӜU<-)Ldg`6*۹@l< ^ ?<r垰brfZ%v;M~ n'GciL+[b'Q JNF50_@hׅ"[*N7ѮGKH#Ѥ!WѴ}{%ǫڵDיy4N 5̻tVUSQ h.0 rѳPp@t^B \zr>LW[(Zzkv)GZbgުQ& X>agو9'dy\[0L̃VH߻[{L`DWx pw*'g^vz*wr)jh|u},n1$pgh|`nU^T[.;#tʠkerM"؝>tO!ܱ=y%FO҇d"X(uH>8F_)(ˣ"nugA#: c}?aXZrf# u]=$"N+Q@׹?'η>7f~G{4q:G}0bmC.I0MNL/}IO|n@*=X3G2)=d RM-7Lt3[! ?j0tl゚0t͜ /1.Nb{1_tGkq$ZrҜkS<SylnL/vc,=_)Æ*J^煗L%pD5*:C87$4i7uE9<tyHx7T`֦V^J'vxBq 48ؿ(4y3fF#*EX6Tp螌᫟TVS_-muT]`r=MTEiמU\2|@nޙRM^3W-wY|p0Bڄ^7{bjš00 3F/hɧ%w׏J>.d7qQG7}|0ȾM+@W> ȢW!D?6IXHL5oH{^>AKOogt򮅸%+u"n!9k)gi{{iʛFfT16 *XV#h9 o.Ɩ7_z[ocn싣_:2f"b5.uǧzgj7!s ߄ɔ!OE@$ŧr37'Ba` JNnV:F®>5J,L+r]-cZc_FzQ&,FeD7+?!)Iȗ~R#+ $8K Սʋb35B0W@,WH97 U304-񉰏1NU3L|@=}7e0>鼠@nRR żmlBmȲ;;F<`o}9RYy&AV~$@"NBlf{,/vGڥϧ_fjl_LBm|(#j-)ΛޮLCK 5_Ji2ȦJi4(aI5b"v1Z7S_'!H+m2\Ϲ8n5E,cGC>M)v̬Ec Itz*V2]),;_$(Ϭ41;`^UQ'֨ S+KnEmG)nH#MrwǠr/ RRz (b8mDü6@7V6rU+#YkVs X~8))BDO/y56ǏR 4pk3$S _"I'}1/ vӚ٫Grn!д$ };͹:$(ߗ[9<]$ʺdXvHݼeol+[D @"\f!#V ɥ.S&8dfMa?ז`ኻ@xC[**jKMo>mϹэ|97aGO*g/e}7s@zf9g(8f0?Rjj`(x% @RU-^}-r?@ozH!WqF ѻ$tvtRvm~]` O8_rηTwKʬyo Ә>9mڈ݂r ykVl"_y_ #}HC*} 磨 EMy`[Jx 9SfGMz h $&2[@6:[1/89MX5arr<pբp`7a7>* wB)Ոi0&Y^O-U~m}#aӨ&bk~,@RIcֹx2p4BOt."mnI`Fɣ ^cɕJwVj"RY=ó :ܘTe@49;9'qa#7_+jU_Ǽ/Vqp4@XSF],ވ;NrXVy,Яus42w<4(+[I5ar#7:1t(Og?3h9 ŭhA%N5'-)C~j)$ TcZ.bɸ|*A}G~Kӌ pAd5̗ǰRk2xAξWχ%3gձT~̠>!&Ǭz@xȨ9A336#VI~Aa%8kV\M C|,l 6=l3qxY7NӰ'JcgVM/Z-1k//HgD\xf uGG>AĖ`ߵIDz^ jT1:I]xz5&Z*o?6 PH=돬4@F/ w*;SC/%h0oi$$J~F/b!=)@16(BL`rz7eˬRV'lir5Ɂ4i@Ӆ[f$?f׫L`{;'XPNRۧj ٱ[ ldTUOLJ, 5t V+RCL3L!>1f*W֪;wЊW[['2s !yF>$3wRTOl%+|=P<*7qX>J֛r`V"R~7$Λ'Jpa3s#G5\puԾ\gRU0=}9iPnȠgrCE1VH{' Y ; <.ȷDl0;/νv*SY&<^gpU/e)iϻGH+9?tf30Ȋht{ǶGj ?c⤃Ȏ"N`ViY J0C Zr|RE!G˼mZ1i3N^Jagi‹kN%X"H|,q2eԱF{˟>VQϲӕ`~sUTΠ-<J3tbT\PCE>'60']/^02W4-yɶL,Yudd7QNp9Zxaj-ajK Օ~;tf;D9hip\_!Y0%PF܋EL< \ytq\ p 3$92E>zߜp,C{Yy8֎̱+`:ZBƙ3U4)]Dx ξ/G籣 7_ NRTR+^ m2swFUnAѢmO*Qz#HQ0X2bJEnM":fZ#2'x_o:#҅,mj9l:c9 ߾FG3Sg (C 4f=ys%kG #Έr䘫Ȍj5rxJ_FD+Ó)ױ;tmlEWW>?GXʊy MVI:G<;{GgW5|.MI:,G87M`yVR؆Yu$EW,j @fإ&K+І5]Jt1(b)J5SXG|d/&DBban/0O;<{ ϓ#y(`jְݕ5)z-O3%̧V~,[;D+2 SS8+"TzzM1g]t_R3/HN7re@ۤO9 P{0IE#4GŔP⋧nhو EDdqA,-eԝdhVY K`F(cgqu4{!ȟ {]DwB{Zw\(A<}؜ QAF hvZoqTi_NŠFw@(gЗ )BZٴPӀ,*$:ܞvH81hb -c/2k+^'Z)^Xb 9jE~-d?cJG袵õ#yFw#)zYXHqÅ~~v*!wVd/j5E dY\p4>+*xrvqi`L?SYuͽn>^)~G 1/C5Lq ƧKa(" nz!kyw/'Χ7Ofۉ|5}oN|̡wGXd1Rv26g#K"/7ؒA _>M'ʹ֍ B},9=jLAAB;$q^>40/;*PAIo)(E~=+j@֝<#ސGR j1]n:.5׆Zqd;v$7979?yP/ݴۛJpH "KP ޢGT>4+-Xc)/P33 x5w|+.VICvzjaġA=hؤtm*Ϧ3L\fg_~6䟑I>bgW"`2梵NqơDR7W uN[ީto92Q#((V\ޒ1ȴS馞}7tǬG@<Ò,r$eP&`|^N<| 6 Jc:*c0RȲO}oa 2.oXrT(EQgl+H4יCLsČ&%sv&n)d jX("^Cg3f9F+s-[sFg%qYQQn;"$ يDe0i >\P Mͼ"9U/֗^s ӡ?tn;"V\bOˈ"#&*;XI虥gMn#f*29Et*Rwo" h7i9|aC}Ҫy#aUbu06m5(z 4ׅ _T$j0dtF+Ώepwu=vW/1B5J1J7f [vQ̛r7y%KQ|'?G\zĎչq!v"UHҡˮ_uuS#ܷ|w%q*2nsWA-o7c}Ξw/Xww+~:fLLb.a&c|Wg~_;T{@[uωn;$GR`„q4*bVQ xGuM?#XDUsM1ڂ'Tt6oo⮷U։$ w:IKE,=XA  h:bUK TN%78@8 &3%y8Hq>z[·Z4J)FzOilյQ[)OdѤS𩥏V ےnDl> h߲g!~LFbZӕfvPe-P*9~yas .MmON;~=3yf ocR<~8'|[l >T8qxs2j#ƌC?)`V>aS$HP+|fw$KFXNFވ -,?ej Eg1D* #HNsLoӉ޲+}Bs\['u8G)7Jno?T&##gtW49"NU缍QYbZya%Ib+8J3uGBdz,޺1ٴ̯QL)4De`'^mO=wzbF?|LCPQu]U;E{{(1$HɺƳ6#=1.]/¬~<#2h %/4Nu*dg^L d6οLn%1\U U>w?7WB\ mвxKn&+we'9Hgo%`"(l2[:XW~5G@fMkd07JT.KܲMI ;ZGfȃ+pu? tK O"a *Oe_k\ 5#{j#tuPL"WCФ k]vh1[\vN;}$J= Vc#&Q 7_f0znuטJ!cݸ_^}w)@OɝW+dDP1"? 2ezF)hӏ&(FӐfa|wHόad"UeڧbK:6 ^-!;b{sig8dp3/ԸD.ΰ%bf`h+X|DPP8NoY.š]$zI1į^"ܰu\("䘨n8ktd#.W࢜eG~ZwX- 8lB%KFtgYG#b|6B>}tPk#ɭf[4m8B.i"p1xtvհ,ʩnr\cN-Mtj4_-XV!2/M,R;D,<Nn~daz5Z<ֆ<;FJz6 8B.c^+<.ğRdo2GŨkj:2~μ|@ܶjPqxGStQX-PJqKmzuǼW2I< "d=pb D-1_ C'yB nvU^y|D-i/qkjz틏𿉒B)j5vav`(0c=*N''d8%WEX'3 Mqhp[[!fuTؗdVed\rtc'o. V(_Rb ȬZ$?Wa7ׂp(J {ԟf#݋oLW㻘!rVwuk3w_JT܍[2)/GE ÑɄ΅M C3=mFζCqDx;8?ҼY}*K[ \*m ؾNNFRbѳ(c.Vljgi)RYGDX h7@,-nGjCa⧩Q[/?N*W^f[px,iiscI J^["?b':#vM0o1v!f Ͳ93 *wOP iEF9; oJ+ >|X=) œdϔvBۘ](A-jJ7b v+9\Zi}Dn&?I'fT*)1L,H͝in.0ߺl5+^ngJv|ICZ*g9,;Kyl,*qj@ѪM=c #@8}-{ a)L9x^@am&{MH8mQ\="ߎAZed8n)j6=|i0 bv3Ua(IW(GPks :kB\k,17h aupt[A C! I:I;`6~]ow:yܡ@C!}XʀAre}h \{kkj>O'SũEK"*U؁öq롢Qu^{ѫk,׸~z; YCPl) ld_WpKJǾ78^4>,v*-Y˾&j#feD"Ge _"*hq4q2}Se~”b$ M,'{`r}׾-++ 88&C*ssef{<;3 2`9߰0q@Dpj29(ko) XWo,+vk,&{߅:r>aG%mAnx136w[gaӂ;νkɃ1z-."_PLtfc$_!H48+b*7//ƭΧo]8 0JeQq/GX{p 䩎`]Lx=QB̖Қ u j cd `G5ˢOFH*rIU[5zJIX5QW0we2lǏŧzY VR m߁^rIvoSQw'qt\wj|SDzj \͐;exo҉?"rLP`TYq3e˰qp,Rna&1X[[_jmaMaS"|v9L^T=|*T93n*!2 ݌ )\^.C;5\N9ۍ+Rg6! 0C >;9 ?gOe%eA(՘!B0z:oZ7#Y&e#&ٽ)K&az\$,"(1~7Ԣk>zQ ob:Yզ F -$a/NCtJF`FFJc28?'#aL?*;JI zz(LPg*E"kh4ۙʂE?2ҥrIdr :8xNViJr5u$Rgb+oQ9a֪!| _,4RhTMSdF3Vw<]nsOxdej|ꧥ D~)MsV!ޡhJoM$K( _AR(jRw]o ?zƔw2 \yMK"⌁͝\&Jt᫇ 9t֟> 3eqb 5j6Oߺx\5HK3ԲHE0f7WcARpkD'xzhJESV1)hܣr$%Ao;2cɓIڵ~Nf7PXcrU8gu+J\d7*'0vu.Ii$<Ħ7jW,33/e)q u` Ų $eNѤ1m1ͭ5A` }waȚ)[m. =fS ?yXBμ¹Rӛe{²bLӴ?;tx a5n݈vi,JZ2fO}fOtťcA[fMRu}"k3)=1oqOL>_5y;x%7P%X-gl8ݎd.42̈ hc$&t3'd޾bKŠ4*9z|<@pe.p_Q6vHݶޣ0j ΖmY9i(X 2bV2TʱwtB_rILTzr*ȍ"Ӳ|t{q\,F -$q Dy`@u7'Es}ļU?Vq!%AYR#vsaP"R@9)8_N=rH"<6mN䳜0He9yB rZe0=)$Bc8`/^e q63$nnS.5G GRp%@BL<݇Y[).vuÐ=;[-L15yo(өA mi&n8Ě#dHh$jjMCVY~84c0P:RI"mGO^ cs**F)f Yjxj \] 1 ihpixiG> UDžM`3N:{FyWYT1/x :Ypq5@ CY#Iq^3ж2O7'8րJBb` P"Ob`Z4 % Bv"[j6gJPaƺ\fjJa DbBʅ~$D#'BMЋ|HGPA`@ӯ@4߉TƔvyz`]9BbA Kcv1 s1vLh fJZH\թ q/vs GaΪK\ 3KYy8 5mOXX뮛+0\b ՋFq$dEŬ|Wڪ0RmšMǒN=6xmr}'HH_?pN]dƎɱž9(-F0O l04CM|((>̩$=!09]OQCէx%qlPi wZJYRWͷiu;bo(z>"9U$&Nx_be!bA;%b45'Vhwc.\W ~mnWG Je*`i3JBFay  pub<`6X> J\%)F$K:6UĮ9o.*xf\@c@6T-҈ǩ%6`‚tna(atmloYHv{ike?9Y5"/{Wgt*]w6a;3YSsXw_iTD;.;xbg)[Q% N[勨!a|IugvIױlaX,Q|Zu<@kY}"Q= xw=Aֻ<-1h T<=_t/’O8ε+jDTʣLej \c]2V抽Z:u`./Ѡ{QjbT҇4&c_ll`g;0ȏR#tGgo< 6kr=Ĉ P-P#dDN*V2\ CAp̔ |=RK3fG YsÄ:kip4 ?1ӶNg{|7Eyd\dg\w=QW&L̀|v33~۞o#v~!m^vM!_xHEV RdR6Q>k+(&}ƿV5B?h:& ?)g\>D 9:muEPBd1/R?7]fѐ9kkS+&߸Rᘩ΂fjMx*˪’ U֑9 |iyN{f8 B]N>žߴtzx&@4SO\kGT_{2:kť#o:M~^`B%_]FJ O\3]Gjۆhoz>?8*}vڂaJ9@eHAWZyZsЙfA5|3_P.r|7>I85nU@hQٮ@s| ]U'zboJ_L(&v,gjx2SOe=qבYJx\osIpORKjd1ygRDmD刑~~dd)$'2b.ˇJE?3ڄ", o([ᰠ0\sn6LEFSdSrۀ=?hXg;i#?hx5DNZh#RVf@q;(҃lr T(ХZ]JYcF\SeC9zãiDt6RE} kmLV_U=˨tmJcHfPcE aFW =Ҵxi9Zt,S͢6tTs xO3)u>y)7rbFSXq{imYKI|WQG@vۿRR{s+p>0io^/8LC !XoY ;G<(]Q!xRB1kt=j %$J1xdfcﴋb&z 1s|+@loWVcN4|2*}A W`ӕ- eܰlVEj5#sKäڀ(If1 tƕׂ*wL7k;ori @z;_hm⊣=yǻ8@h"Ub;Eӛ([KGx[_}pCU?;b=$MhT"RQ'%AP5clAc$yEʗk߇&=v3 `D+|\R eIi~{jt` @먨t( ˣF7OS;Ϲdf0#5N͉YpvbAgCh8n;g%yӁlymn#;)Oy}0vQ\ap(b*nn8egU+;G.A(3Enx̬b֑GIU')k[BW݈;c74ۍo}+"߿ t1;7t<9bqeyrf\6@:FZ&ilp2`|'$ǒ?:Օ8Ļ&X*x90,x^&ݵPns^>ۈh/h;>_A eӺ{{5KsUp<(<]KXIV]bÒ[7d{P)ޓ*}vMG &EJK][n3%(CKK˅?zQPbQg)b>Vѭ"= sf` Q7߅6-En{ğeGW7/XM;QT.r?-J㎈Ms&iH' 1yP]m?kOF#[cGe^(?*F_{J YDzz5)w, tt>l(HQ(L]\@R"U63`=l1`t̶OȞ`AgzL C8C'akxW3MaKPZ0_{Dc/6/Ơxwe]mrƔfL`ET dŲmtߋDVV3 a A1K_N4\0l;;4{-ş;=a5 CXLSA5a䢋 ET<5&{p1V`; r E.BHߢ Y͈a5ћLkfN~j^ م}KﳲP$pdEZ14ۭH8gXҖF>DB}]T7bcl2 枸Vcoa%\lEpXq߾m,h/FdPٯ.URl)h;02ǐ.+q8c q_xڵ@)D zT;e+2+Ye8& c4Kpg8t!WhiIwqDτˠK`ɀd6->ֽ̤+1:y5|R ݘrZ`?i #D BvDײM,j7.Xlņ(qqNM4 6cb '#ˏ*0R0ڭ8|Tr{%Gۨ8z%a:o3,Vp\j&}n{Os~n9Y6Fj1mHMf?@VXl86(] { RpL,]=s9^;#mWvO-%Ρ6Y6,xllnUQ 3˳wR[8%g!/BoLkE-OUiQ(clP=G@l?^|F"!#~<vag{4'&O^. g(nC7v#Rg Jr]\1] ʽO}ֶrtj7(u87$Î<3 1,W?_`Dʚ+|:X/ojAP wtbFqꣴe^z@j3PLF1 D=-R%W (}n"*VB˄,!tb(T31xqx$Y蜼oȒ]aBCJwnD!Lk "ɓ oo2*vÅޮ NZw~~+%Pc#{~ڹzK5;I@) y4Vw n|V*ݼ1f\G`m=|%L̙)Map+^@G W[R嵼#Dꡐ%{bG9}/@I<0+czۮPY=M(k2X~o/* oZfKWn/l߯6d 笪^_U$)\B-Žw xOKcS/'LPYw'A\jKv jZmKSa['Vj=hG]ͨsIANS!H9Nr8Yk:M $ ǜ;)G VOWiUD*l7Q\Ab,DrYN!TV3BZTh̛/۳:Ov/qP2zGJ+Fony@@9>'-)Fp oG.ٯnZxaz*1/%n<>xU [6rת|~Rl_D܈@py5^lNjGTt52A1Blq-ߨr,!$=e(Ot9ܳ-Wg;Ye; _sug}ٴ<;Ic5[Ci"̀-Í ^W8)LmB&J]2'aE=Պ=fQC&վMp+48y9:ZOŶ C|SH{D-j"ĦF14tFxN@됎*-{|E\iV o/,mH;*ʣ)bX |:9A2393TXLW^{X=Q Uuzkw~H>5R"uivD9jv9n\;pd'epԏR+bgk;4E':~>p u{^j4bjBa8")ǓC0b3QlO0V$<}V\rdk@ -BeHݼ?t/ifd#V(a}ߜQ2[l{6QCERM[P-(j`V &Ds'V`<ؾ`X=h{fCUKLlў/ߊ*C+V1ͺDi@Czb3=f.KtZa^D+Ջz6G%ꃯ[pWlۇD (6@ȵ&I0+?WV ԫgJ D EQv&-BI.;nqW52zVϮ#^9\`ȨTK:N1 .ҕLSRƊjDѾy/t3j8DE4ip*GW/V5Z&yJ(L9.k$o !5cM"n_͉@z Qy*.5EJK=VtyyGrMZin9r3DUC\!ɛ6ra]A5*%KZc}V+ݐXm'v|L*N ]j8#ң_Sy,l 9_Os|W.bK'Tl4Kxx2VvN\F9-`7~+J0x 8QH:nbI&ʒLœw^0̋c% $vpmuc\AHK3BRknLқ2+~v=xJ"#;4T\,(긴_ƮYɺ_'T¸P |@ @L 8'w3+jپN]tD'lcZSb-9smгYП+A ,*(, )Re$nZyi-å<);FI.ZCQU H*0)pd)E Hvm<tKȃ0'gP1y [_tsi|"n⣨]m2,b4O؞;uHhk~ְ/<7s랟@fSYn>LAj_ ebt_ch"y`u&U)ZSY`P1'k۵U nKZQFzey;?yǤrK,{ᄲl{uerg+ Ҷ UesU{jN4&IIQ;=G(+{C q>a8(lI`Xˎg~o(}@)brК5o7*r0.D%N 8+t=1׆٭?zsx=yMd Cƀr7qgȜG{U"έ꡴K8”p2ٺuJYJ\`1m~>z3pz18Ɏ yEB~ldawۗuOk!z#l3;':Pf}z51_o`g>ə8_9 NztԷ>/i΍R!y2J!]=9X z2Jͣq&zAm4fZn6+yK{GI㷅Wbwh4ޥWw;OއvA õ҅#YP/=7&hݪf7YVeW'譳2tGm|g]@^OS-D-D{ a21~e{i;/:c8_4ՆVo7I[) $Ek&\ _…)Lj}8eMKTWP7NY&:'@BG&=^ H3c㸪QbEQd႓Md& [/~]'fKʓLRJYܠ֒PN"_¶tF)_%bbZmf*4Vn#Ґŗ-{&A\fREOg?a ش^ "h+hsj&} C~q¿ᨎ_g~m]W0,,#׸3:{z)|`3\؛#(QuQXN*?>c[j./I:.q ÿ:9 S 7CFuzt/;j0 [Y'sd#Ý!Q!~٩(l_es$kko3φəgS(njeK7&eRZ6 4/\݋ b>0 L9x$onfpk>9ionu{e(C CuALʋ]tsb8~S-_g-iN8KㄦNe^;ݓx]=.j(F0vtbՆrtQu2 p\rQW?R;U ϰٰҀgnk1/g'a܀mxK]85 ̿C^c>iˢC5X<ع׾7=.XbĬ(qamXcYGÛhU_WݝA'TdF)-9@,$[>eRĵcMNF 6XnpbW\~w/~.Pm$TKӏX[1_$9u+X*ffXv`c:&S(mqT3#ڋ' V m6rF+]/2HJ"m"i QBuVZoi#{{寭3`9lE~zGD: Anѭ9&ڶfn2K/7 =QEb>"G =4$#;U&cW1N] ~L3|gOB(RU!ɡ8Z7`(E9?\FZzy^S1x0ZRIv碰mm.#1&[yJNN@xj~RkWTܚBX qGC=Ǔ6ڷ4hWb-=vF/J*A@Ls3m\%]UU2rwzB'Vul ZUQ$k+޹昐Sofm 2%ą׷ԉjzt/[]HbF5d@!ȷ?-5Y_N,ڰe~Y-|dNݮ$ٓ֡´qΕd~qΑ ;M m;l 7UG$+2;xnh <7Woz!QcA:a\o A1^Y*%k$B^j , kwqL7i-UP?8'Y&< +itg?{&p2Ϊ}bsPjܔ쐤`MK8hZ=P&oɳ,TBRiQ=A)ïHEz0q[MCHDzVCO @xmPs9f`. ZΩO tB8&-jS< ]c:*d+bF5Eu(bZ%B!s%P|bt,.nb.mJ_)EC>?l<~2Uw@Ɣ~&PwuBM/2Pt}ddj1Ue aEc=;p)TryZ{B_xUwN4laҭT^vQCh$@HybމA+WpKäOR#9׃`\8}\3FbƷs`isQ&į}4_xF_a,% *A,p2Ze9}%o: CJi:["oء\IHN2CJċJhd 3z"r o|3򚺲_g%pӺ6 Ȅ w,8>jsGW#A{$<}|]Yt>oi%*2=Zmt CWb%t&(bšCl*_ZyY;zlMnr4=$@}8y4Ie=U qoea+^3=aIpeMg :¼V1yJc4TvsN9rx:Y ~#8 ;#jbk>4 I0~K7#K)Y'ny!BfDij=ctdL ,c_ ;KT+)bð]zܰ~Iy  Lݣ9-t""1do{靯&aM؁/Csu'R^6UNsɷ&0pHw0fE*d`<S4;*4Q՝`)_-߿ dp m2ƫf_/~0ma0lަWe'hr:}8ǩG򽆹)?. &fض.!.4-/(C1z V34j!>80̫2Z oRM(_Źk*(r#P`j_:d8:$`w36r Q{WjTP%RA~LB@*kg$_nq]\ xXvB,|hj7@T8ذЌJBvN5dhE~6LNI-oas.:BkTo`Td}ꄪ3 B`:eR2Z"cCnV{ qoOݧnȇEmy;9omeAHt &ב&܅6c4rɾ5zsAygMP = f*q; BzvQ-hX)=ZH_ZCt2~{22~WCV3ʊ'[12p$HX?0Mn(fZ2ރ]" k?UjjE8G͘Z \^sE=$W[UNZڧ뗚QKout%̃|>|gPʒBK"l?ueJp{GykKOeۉA>*̈́Ǐmޝ6og(e[bؗZ?re Jn.w[&̄V1 +ʍ?Kkh&Y$+yZ;2᳄)Lc6Ù!*wiUˮnakSͅ5vߐs8KH!;fRv9*$uRިUK5HE=;6.eOcA:{ Dx2_`sQm* ͻ~AcP!E}xȑ}4I1\CkdDLr#R{ %*ȶ.l|h}L&76\x+bzW4Y,xV9RL{70s*+<9qO^i_JO Hg#gO˕Alm-ZOr׽IEiOʞ<ſ W矤~R<êY=ڊÌ23Hݣ#wb6flx8axoVږ-Ipo>"@RRڻ+ mBⲏwm !v|8Iz4H j0KL1H>& 0p.ClNoR= 㲂-O  z9bY\\dd8c@r>ᄨ`9:? W'H`IJY¬7ڢjMjӵNM}A[pnSq7K:4 4.a&KlR»)T}h@֧*RvAF'X@]_y W'Ԏé6x%e^:[~.~6@贉Y7<;};4rRa~%wjǾ6Htab_,4d,  'hPh0ma ^Sp0-ETp@u.T9Mj2 Ѫl%<ͅZ?N)_,d9;[5XԲYӽ#S"` x{ O@)Pk>C*% ޮg-dA_ ѼաeIMhe6`Q Kբ&w4b M6o, ˖e쑒V0ZB汏wWC(ʊT6(Rup?`e[STQm)iNꏒ'kmT'-$hN|T?"RW^ĀȒV7} "0u#ImH&f|ҕJ|r*nu/Mk]Kv 4?`TK_R#? kZs C X/$|Gz! a jCc,G0qKXҵ?ȦA9 i~ Qfl̡_!O '6}o~;'SgdtҊ%ό22ԑ$y:x7+4G`$-6uַ_1DyN3ce ڤj9me tI ߂k#t:[%.MήyjeZCSpJze/bC("=r>y,31({@4b}$^LWhmJf+}J{Unvx6%,>MN]}>(7xF!{5=t @ kTO:xcf)#$P_!'/(ʏ!ntE}]C&=8|X [Á\% sמB{0~˷K# Y&L@E4omP_AyL[9G\Pn<M?N-V įi 1f kbk,VNv`aF˹>wtғ/?/x(6vU$+u$LJ[z:ҵUfƌJM!6iI(^+ dx=#epgt9/á~w(PD''qY{H bj Y=9͚b[?@0?SIuzڲ,!?䡊 v n~K9Rv h6q[ &j$@Ɍ5 up$&fۄ?QId ?[/GSĮ.h~1dT٪Yv&V9x ٫cz~B=V_q$SWN㣁%$,g6>oT\׳\SwTݥg癃e5ң"4 5|M&{pDRzY lTiTg "g8kv_,0tCHV4WD RM}#cR 1t熞tyV'vȖe ^gAr22"m#|HH=,+lM>\uNG`@* •! G>+4WK)/~ Z^3+vOVEq1/d̒GeFB Fr`9'fdU>u < uxp?-2' h+L&ppX%|q1ɽ]+Ч㼁&z._y߈9bOww^ɕn"{ +d-S|8xEle_;Xid,+eXE묒2=gjձ.I*v]/;x2NUxCf)ejOBXc&txB|my FCtHprzK:e ů Ӫ׈0, v}fե{ƹ,sdEIKpzn.qe:] v,ogmJ^.sJO$ I}dM-ٞz0?gkw !a@Q(kxJ 3vkP|{p@c\88P,Lՙe|M5njz0)PxYp'2Z-^ykcX{7 YczDڏmgw^ x>;y7fDG{6m׮2{jrH-<L&‡2yh@5' ;Y_Ğ|9 #Er\a_ϼ&P IF]Up{q=O: fpiHmӧm|Þ8DK!l.O$t6.v !F;l]/tgM1M̃&2F]*1U OԢspFs_*efO@IM-UR B<7(*j : c4K$\h͡A#b($ 胉PCX"*W*G%$wM ]bmLYJ h\$΀Ly1X[Kżbej:gak0/FEM7Ub4Jp>=WPnH۲dSU5k~%yCjTƛdgSfRzJTj&9G]՟+V&kj,8acFJv\Ic_DdhShcZ|ԑLP9QNV(va]MQwKЎ# 9Td<.21MIaKd^}F#zP=\*#Dy))ZȚ3# :ZJj:{Cw"&Пe?c哼ݸʖՉiwrfhdnrf\CU3Q˂\Y-MTTF llZ?kP>Ϋ!Ԋڻ6S0/iuYPЁ[6Mۚ@3 BI+D:0ivn$!#\[9f-ꀁ.yW"08=ZkSQ|3NTcN8dHG%ruX,hV &k~ Z72oFEP$nӅ fv 'cE6amr RY(;SKvy6 >>P. i ) 鿵jLB}j6#)psI, =.j|5D^E8@]<83 Ge7Y8Zw;zAȁmB}ߙϞh TÓT:9ElvXsy>hy+#Jꢞn !)zJWqtg ^p%KY&7&Vh(f0RݹτW1QA}x E:fo!, S=J%yFh|<HeJpޣp9lKmp9cPֆbLS'sLk6ʐ56byԹ뷍pԆ䜻{xR/$Qȵzw~H{^7qٝRn&tMj+O'pti!'9^' ֬/48 fү:Pv =p&{)TG +J4Y}{y~;Mb, MP]m5߻t%oϲpj@,l|oBƑVWڛF~ؿ Su/zGSˇG| {ߌ.`J/궣Itd36xonw/gUߠ* ZIAv1O.@$IvV7=ʅ,̏y~)p!lacVvKIkK#ʼn1'hosF+zvenοH<.Oe ڇ!PMA#蔊@;Ć ŒQҭdXzHcylH LaJeu+@j#ݬ3`~Fb9L35crlXcG򡽎,)2K<| [!qF6|X蚅Vzt6,]>}CۊD"8Yf}>ʉ* 0Xh-p|&[?$ܟɪ5|.Uz(?M0F0J)i *):,P=y+ĖuVk lΝہJSwKrOqs fcG TĚD>Ӄ_%gd.=¢p-\έM<>0f95+T^V`a' a,|Oz3`"gF-h)~ڳr8 wLjjcIBsU77{w]]ߨ0ZJM%Z XH ˲@(4OŷU>8b'U9Wf3Z2(P)R \0\eR*w7X@hwlϟa Ԋk[ntm:c(I,<8f"gшl"w?[ ĺ{㪉٫[hOz>vnyAR BͰUl7S=M!QB9):Jys>7޳q\Wc'ވNC!?~sNUśx)4(?F&yr&^|xRX +D&-aR[S5A@sEA zn>0ie\ 'ENp HO2<1:[ G-G ^ZE]Z|g;c<3]0Ԍfdl AҦI}DV>K'g);&rC=7=aj|%Ą @С;UIWnima#BOUK4>f*PEC~$ TV 6k?i7.~)At>jV!?(gigEK97w!qJ.y,!*0BeѠMR0Շi0I)}.-`OJ[R#}+ xR:6Sꗍ};< W^6)IcMGz:X]enn< 51=\eJ\JF)]`,qS[7vo%m$M&aEp/E21gഉ\h b rGf ԉ-߽&t~aKglv%6$ꑓ$T.ՅboPS|d{Aď 0>(絿7c /% ~6-/bfoiS' ksScEERHI?r23sH0ha _[)x#*w ph__-: ) Xc4&d777}k[:,6E\5ڐ`1ŷhO w~pj7~bu^([ߧOٳ<}֔@轕:f%#h33d7pr6H,ry346։x0t̔ڋ%Mk9T.7u@`aƉ^m]-9OQ {/ZZ$3`wX,zI9i'~t(^Wqedg@'W)[fhBcB'/ n07F$H\Ũ|اYڗ ҂>~ ˆ_|jWVvJ(̐[q,qصxL9$,xX7DXtCpN6z&qzet`_:PPQz$jgid\4UԻAÎ,WÙ!AHnkXlYXiϭ.%/gM_2wWʞk_kY`|*fZA_Sc1&$Qgo[oFhO׫Ad[Ud<)mZȨ޶rS{c)IݒM*.";n#yS}z d8y?ֈ*6c敊 eoTRݧWdq̿p^<:b&?:0U2BC! t$67*rpT;^m?wt{c1c/oD ]!>+ʏuP:9C-a'𤍸6Ӳ/^cQ{<|׻mٱ$9תB|*?lؗ0>G=byge $ʹT3l|x]G1ET62>e*Qi&A@hk|\b~(ץSTsQ Qb C0*dvysK&]Llb{Oj7nϓ"# ʏj`w&v v K*`tN74Y4 Xњw%̍dU"헩H\@ !UEr`n ϶˄495Hnͮ7d&1% ,\ ִI$]O`SG {&r++4u-ox{D&ͼ EbKS, Wb8#hŌ9C]Fʃx(9@ȠPnz͌ g 8phuudF/}5D>i\)P/lXMVDDŽNS4[nΤ#Y)`avjbL|~%@#lU{%s6&* Bմw &˼ wFɠO>a,/3!OCs-Bw$&5snܕ8yKc9`f%.2;c;[%F $fnj5lx)O!͊De1[ eL@G 'Hy9 iym.0xv7YK80 * ڞF ;:υ'%)مlc ,@k3[ZߎnT/%$28,aSr }>u~̄4c| % !/Ə|IkBMvJ9zq!Ng)s. ap<^c Uro dK[Wo qTeMKn@MnW|fNrN׬7-mS F(gtÃ!].IrPNz~kݤۇ)ByyQaUĀQ ItV? Ngԏ7`-\ұ/$`H3k:wgL?u¼ FQQ -xć4g[wǓU,(VgDTKD' z.` -ƷU(H_p}HJٌH1 ˙GسB$&8 3 O  ;#,RE@Hp"wQ)C$c!XRq *h{;5ch;3UNo(K@߿%`?X7uӠg,]-7 dU6:$L"Äa ؝r70 O-Ӝ:o*BzGE=K$yW:Twb7 Du! 0Mzo4b5c;¡)GчHRʞ4b3Yߏ18{ Xe&$J{1r16B>=*igpfySĨq!n4,@J2>hegjBh3~9W+w308>u 2+PneT<N@*051,T?@ɐԦ(}ɓ"ENFB koW{-e )a7K OꚮLFH_TL$czRZTǛտ-c.(G6 )p+nӠi+$U2G -T1j<+*?' 3zPm}$|gSb/}>z^]8GF6~ 6.>/3\%n hK?҉KLy8G2g\o`XYPQ0M 7TPɴQV^*>]ڨRCs| S@gRs}s∎S\;Ovo07 iL)@G8R@9L,,wj :Z;|"Gځb{A1JkPt@+㞕I`F@)2z㳝x C思=Bka)` AhOCK ,QY vO4h(ǚ@M xf4 I*âfӴIrLM`9c߱o{ y"}G(<89JC\T`C+:G/=AJ/GMZQkU/CN_#&[ϔXdTyB5*Eba,NuSWAUyr2;]JHjފ"S& )`P[X+5QY8Q$Lk<6a)5q{YrspҤ1!\a9x]an&]_s $(H>.ݠ,kkDyJ}1 Z|~ZQS9B$ۋX S Xc͸[p~""͢]lIfg?m}[{ Ba\bYD[(W* pv;flZ] ͩ֙ ,M(sn Y/oS4Tj@~ʑW5 / znS]2x bV\kDǨ{QI?^96q̏z@ĝsL?'E^ihpy9_D$w|Ѣͣ_{䢠x/T1;2%(SpTܞAih wuBQ^2ܫE%oȧb)❉qg$ &6>af%G5=o ̐x/)C6g;aDa6jVFLA%nqOJG5o1G}r73X ^BgV>a_E%*i7ulL7hƍŦL-oxfi&$I*g+(0β5@l/;!pyh*݆ pVF`岫5O$e#@?^1(PEVdA } H{gH[渨OYn+oUݛ$ s0 T/y N.D? I}owYau'X^ǣ?>\wex%}sdGݡXʓ}pk™ r^SX=2¹ Lҕ79} n3C,Jfcj@1z;}S䷥CJMK`*sB.ɀKfrEAMhя_P~ 2H.\rv"gQ;Qqv ڗm2fJs̚Dau'‹6aְH_ڡٶ{,E+VB.!j\]Sbe-oF-Uw~ư Q1V@{Ƹ12[7(9]3?AceD X6brw P_'G>ggMn="BP3[lBAmoy$FAUc}ľp}aC/y"(#8)9HC͢Mr]f #L yłkZsjj9 VL)ќ0(ԍA?v CazɢV*p̓EP?(Fka `N1[r56 B<>c'HI9_Ի+4(7otMʮ}=iSHRڰ`BȉjB6[xAv QTJ{s0sbd!Ns 7<-٣8_Id_O˱#-hl>䈠k:}Z 4ksV 7[,VM`*[ʌ":GĻueέ\d4bDJ| onγ|Y2J%֧K,r ͫ_e|&uHQ͜,ZuEhөfCmi8EeGl;d4io4 5фL\~ķ?eQw5xݔm]hS\#r@"W_˼N|!eo \[ &h<ͳ\h:. KC@IX3+ݐe1Q0JJ ЂUn_]I oQia/:1:F{أlɅQsq+XHuS>=F,7rj.~XMԌ^ kLj)~"Ҁ,XTǴ\29ZZϥcZ =@0$G_gGӒQVWlK;o_Fs8 >.{n_1jX1``Urm 77ܸqVue Ƽ*xUsH19*p e/ik-[b9 WNi(}gtE60!@:Wxrq:~QˣZUU5H,\t6{ yFFOs]z{-1Ofh= w-r\LҬ2T‡7]u+ܥrpw."R*˹%K[-yB`q6oY?(󙅳\'= -X0w3j$R麱` }=EĂtbr O+хVQ.2mEqB\bҥ5լ"DL鳤8C ^K~e߇U9f`)8n؝3'Y NXSifOD䄚Znr1nEC؏bwHC2mVT-C['HOa`* \e=uN溍wh8Et}@<µ$hkN:(aG.T`fHĥ/jEE.ni4".h+}{޿-R含xH9\{;?˸s/V=[I-& # 0q4=#1dݤW|JdR;UOzi0Q]rQbNzx4jd }zB$cU:hyb>H},Wl>C 7#ic\9t Ўڐ.,U}_̒+y%JirY`Cn}-5A[0c++] -H'g8(:"wRs.e;7!b Fes?104ѴTvf-g\Tot蠠N {hC&±s.$񳱂& k^ZF*D[kN Dm4lrG<1Xb}:(u^PB C/%I|k'W.& PvuQ'Ř?Aٷ&)P4;FKsk# F+}9CKOF楴N$28$U+ӈad}}@zOL M\{D zQǑ4#}o^p27C.7פE8P{B}QGgFzv$8y1͠7l+aJث~yFhEח!?yB B'oqfUe42gڒ Vm2 Wu,uwb_C}.mԲ|i!OV#) ʂD;Qea> i!!+q?DnzK̩(ҍOu3эLo-7ڐ縮kǖ4L6*SoE T 5L5dg׊#s=_u>tɼ`9KoqU]J1lT+C6UqWi @+a~)L}BzFE,<%Z,-h i'xsrZ#ZS07[v;3dcڅpRR 4eZstjRx I:|Q: on0坘2sb0]ۧ`|sve™˦=EҴ U䋜Vu0y0,|s~Q,lDrH#tp_)ч@k y}?PʟM+lh?M;Snj~mw5.(sY S)fzKw MMPpI`U+i0Jl&ZU~&%1NU M |Ô|JƓ2, ?9h9#= Y%4cɡޞSg#HˋC; Lr2{LJ wB[ 6%8ݍ[baj<ꚸ2 !#未y]ťxuQ.;2ȪJvKOEϔ,v1m=T$8app o.%g\gvL/gsz G]bh͊F!m2OۓX#Dy8@pZ 29Y<h\z 􅄒/2˧ԫ,UEwaA%X d6[^iUo]1(RNƤ{oi[ۿM{DZIӱ4b m T{c@S@7ʷ=;綳  C=x w8chJTK;-\ Cf>&t ]\,YгƱm=^_qg_'bvBAfh @_R/RP9_DLT$RICz{= /J`O& xea568K%O~m%yŔ KRG픤589u2cV/(UxAZAI/9oV` `@l6no#?"%XMJy P{xeڼ'Z ͅO=b'aw[TvWK1VR3>%pD7{{aKXWS>o> 1^zsOE^%CvR5y9h@9/W`ph@01}ZźC{ ^I7hSY<"1jm@`׎vY,ltb%tj_gINc9 uܤ2NJY0u7GIW EBkȋIp[YvsSG"ME.<#gV-Z1yĒӐ^T;N9A $i ZDGyW\4.k-Hp a?#s̈́/w}qu5AZjqTkl|p5Z3t舔WHPLt%\69\-`vc^/l,7uȎ  +@@T'lH lkcp۬4m'y&X֮p1(&|` XH3:X )7#ywo6Ehj3:Fꗗ^N!#YPڬ>cR΃`1:o/3 %鷞7| ڒi72TD;X TJ,rGͷslpyVֿu/?h!'~/s† ǦX|eqW^ݑacUJ ӝT^:jR3dx6/fvb\Zn͹Rh?tj5ƙĔX8O\H)8;Ӵq!!c}KEO7Tګ <\d %B4!ȗ?[B/|QNhSN!$9z놓D}UbEb2,Ċ5oN9oݣ*:!5 Jd| kJjii/%,bJvCCΛBz>p(aP'^[,r6SҧQ^ M զ.SJ𨾮@dž[4+ ss,R:ţzXZII#M lR7`R}e/wrn(‡M7TyRs9;p{2+Ҙ-h&n~KQ迲Db]4ֵxdosxpEVbmȇc" ynmOY<۔"a즩ֿUq.ԦG^HfmD>}GM-sJf\fM^nQxC{71)*:+`G}/^ cȉ|)~l B_N흢|B0~&/Jēō6t 26rFIeˋCnw,GgBZ] wNtm wB*8fW<.@ZGSuUGE15 R @PrB"J2g$?Oaæx<جGfkSAxTt#p42RNg`=N hH6P>`~3&G9 r_](Ufk*ɞа$Ys\;{穒\'Xej d.31xv&#>}? 8q`6,m*!7P(ýH2G4+K ֱ$ʲLir(Gm9τ2F*&;q3멣Ϊ}r`py-5;w.-@"ssT,;tb%$!g+Xz-Q2o|7\ ,, ?iG3@҄s(o*+l {ro{ۢ> ?8E*O[+aD.é33kAvcW:#A] ax̙c76ɡ١y]9=YV,yz d}:6'iۮ'mqV*$ToDG$t*|QwDTu4,9&.{?/ ~Yոs'T{'DH|/B7"u_83OEŏnTvnhSQ0b5Kf1ouo:eJ R͈y7-jȒJS}e/ZIƜ eEn+'>? ,@5H/կ1fdW3T˱Jue~DBtqRDj,;UD`]KV:6ZARvK1FHU_k3)EiKi<.>iɴE Ln@6ӞP:K"+]\f_V jM䲢梻b 韇g X8u"! +.΍IV*m927ꤡ3"|-*2a g?<ȉ,"9 Yؠ:¥B>LQ'13^@.aD+A{6i_m| qܖ]Ƈ=k[Fx.)+bm7rƻӵh}DqN[c,̠.MQ=\ʧRFI3pM*iT|Rɯ?օiowu (8D hB˝peЍ?вr:PA-hKuS2=xsUKSKZ.Rϳ{TMx`0^9b7 ,i0>[ǂ ,avA@Fo@j4?ռB&(oT/|>t+!,ā{IaI+}R3GF: 8tNȹ;,C93nA˻`7J:RѮ$=ZA[y@Hi ML_𖇢"&{-2դq@LȼԒa8Hf>.W9d;ϥGyTOcH=eA9z l@U~*`Ѡ:0$~N#K ˹O~Nn<<ɴDXP HR`ҪcfQlx3㏦TX-6$yY.sd#68T3XS W@*/~^e|%=ѹ%j_^bnԷ4`vu(}I;~R 8c81w X4Qfvx2S*BWǬ>(\j]R4`ϻ IY&R✀» ($EKbc1"i1zSvM9BPneEwxD5δhUQt,IIo;/y,QHä_2G*yJ,BV&- ;iԯ~&;W;}x!JڌM5.C1w8I)2fDoxx3S#IZEQpI?SҦ+֖{F012W_1}ԟ-' s pȘnq]Ṍ& G`gHoqIA-6 qkv_34ysc~p1;]2;j{9@2ON* $n!FZhy]jBua0ko唼ݒ1kH" 2+f9IbFh@d咙 8!7FW剡२ 5F=:\e@}(,*\RZO7!ԃJcBa715 Cy;}*f*=>7bӘ1:w+ گFP*WMzO$70Xv8 XS./w UCѽF4t㮃nԩp6Mӕ#%$*f/Ab)sӐ-gU*X^}sD]Ƚi>!7w\`)v:K@ (\Ox>tK{jAoA a]~sh.Ɖ\\ k}r^Rh .P_|ykOM +Wlx.? ke-B7 NͿ +)b'JklՄ/Wtԋ4`9si 𩋳[@m c1 <$$KڵAxw$ "/ K0ꆳ*&TG4M v޵@TYgw(v|J#hhb2kؿiF<6c`$ML8zI] M |'nK2z&_>)F3)d8ǸXF=:  :X :.:{Fd2O+% $S3 Jެgm9hp/Uo|P #8Jh:e-G%2v*{+4nk YQg =Ko_,-#07;2fB$!  ->՝U *!5?1^5xȜ%q1-O'S`Pڻ.M8zL{מ5TLՙ3ΑB@S';J9Q]D.VP2^n{|B @Mz ̩j96N{`,m,vy(HK@ SS ց 4G\.mWe|8P.ghƼބ\GwǣD$?, DPN wSG\d7)̧ Nأ$w!e7aԦ[ƀ87}v$JL8 0+L;^9#Ev]n]kPN;j#ʾ_RI IlC3)}3~s@3>,*&no-t5!CLPL} xk3t1#\i98= ^.JM.}uWN*";*gja3(qDC)Qukdzg{->-nfmaKPՆb /$ZYsDBYl6Vg1$.~FYf>qQ2+>sv *D' RjPz5fgGoe?(Z>I_s7ARz_c*P/pBFM֕ҙMcj^@S%P~vkj"!h-;ϙ]ȷ6zގNc[lDi;D7;Py jxlù"|ӃdRUp#5Ĺr"Ѐۢi3OfWU@U@=W˷ufï^c&:%HZ(@mSs{93m} 4q 7r\g*(`ջd-?oh%UN)ջm#WǮ} ] { u+K%@ )W_uuOvK*]pcql_Yg|sFFzԑ۝Y.䄪'O=M5AZȝpEpYڋ5dD,m[_ uW>cA/ 227\h'E1r[c`e)3so4)Iavm %x @€ybQׅ hAj to|7*\)sׂ>7 5Dn1V$I*mUwjS^Bo8@uIH$/3$]8$7ue`|&DϞLM< Y=Cœym^7ֲA5 ~[N+f"oр|=|} _{Oo\znkx.&BMgb,>)pp,{;Hѷ.*2ҳ$R9oC0_QʌPbEŶCTҾ89RS~BmdhC&'Mc;rX.SXB JShׅq̓&E+ cH O1G3A{Ô+}قTEno2_}_kIUj%_L6I;%hت6ӹ8J鵗,˰553aB_<vWEތ%q I^\`N'^1￉6aIyy4iT׃ d)1MaH9WEEN6>t=Biv%G3:-ХGj#Å8 %TW{}IǼ7 6W.< Cb!bXg>_LǺCiA}>tm4"ow߃Z|2K5iUn8U1YHmW?e=101`Cj3[ D]ڼdf1:+ctk-K.RCRء%$ofu;'Nў(?3̟5,#|.PIs6%1A *C1AzwsiNpG[Jޓ|*騕Wd/desS5a^&0 B?2A㺜39 NXzќ D S7d]|b&A]/ءjArMA/lD1k>QGźE pbSowC=CIGsUjRfP%҂qRS 37"!Ejtx}y K=mN ~ vG%Rc3و.G,+ǯd{7+K[u9 rɚϒP8;BJjb<2/4M$+p6/pTn]t`[3Jz:Wiů`AN-+ Rtdxnl *n2f6_:VG%w%V3Eݸ}MgE?msFĆFL~-Q^6 }GS/&n٠ߒITBv1W]_WTj6Xg*:mvdYs,cz5mυYx+0?v"uB"%w% ]*uRraklؑ>L;;f{lpjC:! 0p&&֛z^L(״tI&s!\[SRʴ:U}oHo\vaz0a!% MT6mES7b_^T*箞 'Y Fp+9\B,#@~AObh̃X;ɠy̐?l!P$${;!1=17-p`5M~p74F I?a3*U8W7?24 [~j@i"t#Iޝb eKkqkgD;SOx#CT~0S H~!pǍ6Bͬh pwa?PyN<j! G`4`>0"ؐTpeOnLoUPnۂ>pyX&)WU0V͗[Vƭp@?LiIދvrGE{{Q4dh)ӥ`&}2\OYvr 9e%D/d"ﺰ f=tTy݆|o9Yi,3AtPD: +U'oWpM.ի0C`X伤[,m}$mgQjSo8H "Zb@vR#rڹ(>*{?v 0`<'VNOSj\>=GK!7H3gy慧ۄ.d0WEbH*ŽХ%|dpYi +SqvyCyToSŠ$$yr2 9&{=^鸉0*JH(Uׅt.JA3 h:=&١@Z[ m5r6ɵ(:Fʹq3!1ĦC{ eҢakL1wWKQ-t ޢg64I3H jG>Ne[fh*whknBbQT]{Ot޳z3]淞Ai޼VX^\]i pO(Ʉ> gCdrrn蛇xZ.t ~6C"-oKw&*<LؚllCc*C,Q({d -]Š]vss7'$Џò|ѝO0BbMT~q L2[J\51̠Jځ‰|zxíTV͆~o؎!ewXa!ݦ]^8u5lx+gd!nF+՛Phf =ѷG_9B9[FS* a(J]uKxħ|m)|y񈗩msƒ\kIa poEYeRKuiX9'׆O:L6ݕzˌC7~0?(v+?1me 0]j>0ꀮ4{`[pnȰ78xO_#rLʹ\\0>Ղ5lq\c8=Z{DǓm+vY7pvʧlN4ZKK&G(jg8ɂ4j^ÓKwʪ9dэcEK?59äs`!{6x"&2slXR(Œ5uYn jas<9pۗ &')'S?"_oKSEdڈ}`Ε=B wSeY}(D)u&u|er0ZD{7'1Fލ.&ut`Uk0|mK'R׆ʴR>ĉh)޳U^_{Ύ5LB~2 2رop Od_c¡ccfI #B[dSjz2b߫ PhSPaD9FJn|8m4^Mf|l<'pcKoD.I(10vICzqqUFn2NRe~7SR:cS^e 3k6/Qagυޤ_}6@90`uTz\9@% I >lux9•f} v],M<BbZIIx)'" Ebp.U(MP^ڵ WwA^oj$=0o!|alV>OS@_>$T`xj QjrTPtr[]BG#C(yC4y=޳lQx8e]JZv-b]nL 5m~3O1[[!LϋY6[X_"y.m-ǮnDPMʰT[28 l ꇶWSa4P8"] ~Mօq ԽEf5T S@p;נj6Ҵ !j:FHESΈG }`7/3AN_ތUC*zix;Үoe~RHA"//2,-%\M( qZ~;H24]0?a D q/?q֊#VU9ypm*-B!0Wg+-GRo]@{$&W  q:G}Ϧ[%pv)u‡4f? >^Ƙ̂@UR_ȕ8~TUc#]+J[[\FE v.^|eGeZXaK|x TGFeh:P&0U6I[(.S -;\".UHE<#Qc/yüx1 ,[V#Ю;ݏo;)e+ޯWe2[Tv'G@uRn ,Pv G1 "cn7E.v_ ' lo?x$qe.C#Y(#@uUx^`JJr vJ̈6-%'jYrztFΧ96NT4O$QBwF #+2ߢ?{^O:J'O4۱MGěSCBzENH=nV?td& dF_8:R액a>y`EY22 p2|뜙uuJ67⍯ ɍۿX҂_2P we:"8O*|*Ͻ.#34_l&W2⎛[Gρf4h,^ν@>p73Gqo>[(ʠ/KX`-bKn6t:  wrqq߀㟌E[\\F3'\V'ShDS JǀDg}ˡM[g#(F 8/ZK L"zy1^2xNO.qΝ/N0i %rL '>Z2Ìֺ?Չ$"k5&TĜA.Xoܤ'p|H7a3q}SczWWWDܽ <.8:&K!Gy{7=QⅡvM^I؅l SazTﺽCߌ,Pپ~Q@݈Ц 'W2Kmo U3b?m\kGGdZ- P}UiVTԷp96uR=caNpML+n.G;l^V@zб7v+@ܷMO*+WœU(b/E RKCla $wѲ?1Fb@I6 \o_U+wN5SʷZ n0GPe3b/D&pʈ\3ѥ4}Sp +Sqϭbi7w=_iY-tRtmұbaL,. A&hDl S4TKW=Zha{UU3is?{ {w><8nzٯ8=~%H +ϟoS*=KOz+-y=ϝvsk1fK 45ص}Us4KH2.Wb4`m_ W[Q3#, ;xg 5[}KIO~ O'3ϞFvSGCpR d޾ow]֞ffc)2^k<Xv9-e G&D{ͥCg$'f4:ty7=wb]q8,ꧡfGo"s~ɜT;/{2Ոo LǬe2_n(tPugD@58t,k%O#uQQN5էۡyI8 7j"1 f_Im1Ԁ#bh.j3;%ܽwI(1p"C9:ڇS>Y kcU*1A{b&:j<'+utpMWI* >& gsx>!i^n| :[qZ^yh4#xGކL`VVIu \F@@ҡPPGaŦGZLPUim9lv v88av D:ׄ.DG.)(O-ow Xb?EPDB )'pɇHk\Ob>2T1%:f'\$$*'%,;HxΖ_1J0>N&߄M-U ac$D4|2 [X]&ǹƙkMXȸrd-I^,-K`4ٱ@Q=L kG+3`!5W"Uvלl2BfOԯ48ۛwWJuq \d5+˴.ݶЩ-Ye!ŪCj鷁y2&dT%$鉸[M&>*NGĢ5`L}ѓ}^,|Ru66#HXuakkPiy{4q<"{ՉЋp?MfT#F<M,fG3gDS n"F. g,6ݪ^#t4q.#C"*G6eN ,_=#KѩvD;\Oڥһbʊo&bJƉ{WEr2FW4;KCgayl@Kͯc)F&HC3nII՛yYOZkKYcLRxmqkihňfp )#2DQہnJBnM$ź)T52dlҿX@/C/U}CC1_H赑Nin5d%_k nH`poJ%.||zgo7 h9n!q#[^%Jh>i%"B/|zX9o%d}hq|Jz*Zs*T ܅C}gS4x4 '\n.+ohwtG΃[-uՍ56Df(lmsw:A_$ i 6([;iNM.9^Pfcy6$/%WTÁzm^{1:Z?VdT7 ߕŭIX A 9u,yjd{oT9&OQ1!l.K0cmaW ֺSvF_JB"%O݋c0ӅuOV sV)ѯ3v@9X:&Ŷz,EDވD^YemMs=3PkBzu#g6+fZu! r4D~5&"k}=T.-w9\udh{@F!M{SbSӴl!G"|HRޒng/ߢHwBH@*n# 0\i64F913ǽ*ܟD? V:;?V|%ʥ&Pܜd.6]/LP'^〲)EH4s1 69E(MG` 0u翚? ,/[?`z]X$u앺u]ĺ,s8^F&g`WRZ)n,zIdÍT^g6fwu+Z6|c=R>?{\q3'uP57~Uw}&F}OQdJ7EK{*$ٝQ2m-,vZ%%sO\QGq{١N +aZ:9ub^vZ]>҇ת/=y￁m?}3Ǎ/IvW2{#*EAvr$f"C;߶ObL*eM4 K jq`h砨3HH!i(4:=*qiQOy㒞ucI~y"KWP3MQƇc>):  yB*h{VI [O$'+t(K2>-2pq aj+~x9ۦ8!Cdf!gwfsǰ*طVo< pM%Ʉ16 U֓ kKfjMBk`Qg/:7z#(…Vi*W]͜`0i]Xh8挩4C/Z׿ɛa\3jثêcef9sm?EUkcIw$rpby?y`ޕK 9EzXp+}nW:,P ,B7y{23[lOFFa/G1kizvtiA&Q1j Ly9&jm0O1 Vs\cq "kl$lRKcG+G(ˏ@,媌J9H環ZF[Nr6oc`;q-eaf|E!U>68Rd;w  뇎fM_KtwB]Z ,1 Iė)2lnp!exK9hB@4y 8_)ƙ:UhIg}~T\)GeDiLُ֠O50{F(xCsT?dC?N eM+Sd:ӡ^_[KkG^IfG?&%v; m5IƓt$Q̉D .텡D S$YdBFLYS`1]p0Jo} ֋稰jdM*q=0k_y|JVZfzS$d qElg2g6'Ntc q %uP4+кx"e@ߠ]O=kRFkYE, ѫ.'/="lӧ4lb9/JY|qNQy|!zNq=xn^p볈Mpu_=W`\ 1[F^cP lW@M=h]xEeOuzƴUڍEY\;cYle #aFI%vRvsw]-f u&%`*7 RcDÏ@vTqa-Teߌ7*SZ '[;O0`l#|t9@:2m;MOg+eݼ}EJO߻ 0+.45H'mFV @^KSX)v'c.x?![müud`Hb?~ϷjN.)lNef24xuZY$ Nc\kַ]'(PIZSnl&j_Wvo2/!tiSYhIq;X O>H#Q.hnwΝz#ƘL!)c{FvW̥.VV$@:My:s>H,178TcWR@14TDwU ApXOAQ#Y j  5/}6);'ې nv+]T> vR#pa6.hJ`RG_T#? 7Jto8^Oj]UA.B,j4&!F `*PkM>X}c٢]$SP#ʀ4*F-M}Y\P+ڈ_Tό83w>c)we'VKt kt՚~'E-7/sEkS`BX"\f&ҷ=ilPTBXpjy[kLI`KP->,ұ1 tÄJ:?cA9{%k+*g9xAzF.arV2'JHd6Q*o6HXxn,X',dv_B}0ONl?0lbwV]UBŀNӢ K"]⩛,EeNLSٔ}Œ=I7O'&b#RP#'wW7m;K@ ,.5#^eWt1 hO:|Â;\ά]n%H_ G/C7c&ITp}P~#6̆eE39 W.>:,ƀ1R΁VB7B,,V[uaˇ2FPp}{x.4=ױYy`xl&PAVj6z 8i\O1Y4So=ɠk2?Ui8fEj3z\qn ?eUoF`9S^W֠bΪcd*!\Z6l4ɛ kÍ H#Obwi 4ԞJ]00PЎ,SJ=ҿ#^<xj0꒘>겑U|_6:qO'(, Q}&o0K#*jpuv.Y3jA6 ?DCKWMh( .lk HnR_Ln}ڈ5f`>5`m8[t#r9f!^%`r*.doÃCn}'<}YUU /JzSr?{5R RO c<:;9 2Ê,?D(Qċ(O 6 k:DeFR:Z), yI.\c(iZs!,0Uy9y凁Cse;P L=sWNdO&tjbQ˛x o~xeZˡB.Ţ+{4'i״)b}{USK* vhRGFrנ`CXZz3ޞ7Gwswɖfpibgrr[F[lt^ "1P 5t ?tm_^a-C@͢4q# &8`=2Ope􀔎s' *Vu_r h=uIs;:] 1"鏒mJqc_ϥ%ŒS)syF' i$@,Mel(wp%0Ù85ݣ-F-.T)|Wv犕ն`Zt쓨(UGc+ >gy)'Y k՜O#뼏/+fv,H=]czd'r ˪ǕՒ-IZѸ"GM(M[a'_e. bf&V*r$av;F>2(Ѓ=Ra5j aP<i8:iff._Z3ĹUc-Ј[43&vns/Đ,3"Z 94&{X iS2'>Q? Vyk~\'E>È+$(Eyz՟mvK\sY@H5kW~;eB$6儤np+-YfŰщ};(yѠ,ҡ}FDx*j<BDم\Mg>]X/KG]xo4rC9p&]J?p98p &i96 "Y{D qH?v7r\<Ow&dW"+XUp꣪a:\?@O`|=$#fۼgzTh*P [ [(=0q!E4BRafδIJC;ˇwB>">+&i^>-n/W˦$?fU o] p7xLZ3@ +;0s D(cFϸg>*z}lޛ[v C>l v nKF YK*cjt9sN ˳2(j#)^IL8Ҕ  US[B\^B?cmK7LPo#J3DQ)c\!m:]BDs N^Zjvy[׏?߼q~_q57Uk^l@>9u?k9$jQì>J&[3C򠢞/ȴrHv־`: 356"@SɼGL @CՑ0qN E0(J#j=W}ᴸQ:e^^=Adޖ#u.Iu2pf<5ч3.}z'f)wwd0sQUFеOn8xF86 SS(RCD c5-y,*j\q[E9~;r}xmԬ2JnnYy|z2ᚪ;1^/9/Kz# ߲wQ.nfi=W<𝵣,8+=A/@]E,Q3ȑB+֭HyVmj?] h|;W*fcѩ}n8@:akŀ߻"wRkS `'3x7l!JJ jۏv-eN!w}m"/\qP%e=ўڽL3Mw@CSB#G͠O SoNN^| ;]K7p%Bjr? B1rdV1[9ĽZYW+up:l& o6^y/d!~愹!^}Eݮ4M(トwS0BH[ǫ 50)WIx*%QAM©3\|= pFEA@o"24! u` \/R+pP{81M4xB oobUe@NcJqaKaJweg0@A2)-x ?y830DZ20:ߠڛXnZ#6vAF!6krլ}:zY i%CHیڥ*4,ۄ=rʧdv"DNڲ9$(I@j{ߠ!nѡ3[0 ɦ':,74g>6&QVWʮ!^]٦% =?4BF6'y͚-\L;KV(`8˪:)d͌./ SIa9QAӂ,&p+Z`3ΞXte6Uo3J:.(`ⒾJYԺܐAOqgQ`bV(Ø*#0艗R}GnL))4:(%,:Gu?-SV~Rmu-0jlhzOTp$KbLb'=Pez8|D$P$kR׎s_oxT(+L ~JDN6.QvǮtgOJRwx!dl6t@wkQi w6 .c/Ŷl4e>ǰ\m. qd\*b?7F͘o4uJlJᑅZ)}=%4E})ԪUDgX<$Zz(_I}HZHu:~xf)ܠ<'K hR>YRhtdW#H+6}sr%6Z+geuV x@]uBf{;w4&+dNyƯȨzKW\ (pup- f]#|g ?O xmܺ875uq8;Kr/>4Xz4;=̈́R:$ğW1O=O/29ݎ]?i?9 8+,vSnv gOݫ4=#w| j@&DQH h6SL5I G)ًL/\jB7o'JHD=Zur&fJA[ 0E1);,K f݃܀Z!^X[2u$P𞯓]69NУG4eAgCK6퀩AOHm䙭w+v"|ֽ7*ہRP= [tI9V$~ vs*MTyxK&ްH0S~c0lVOX/\6ɕbmޜZ%ƨr}wTSti'yܔ |QmgfTðg߲b!-h8LuZꮃ#WHm$WgnZX;t# v -fP>5eM64tG:+ܦ_qƩ6/r[Tn_m>+h*,)XjZrthq"\us,2sARQ I>"[!vX;[kµ $JvOB _ ctN lX FL%z d}y&q' ӄQST X8 +oĕL%WFD8WG dl@H_(Q3ajRR{+ 5w8W(nL^%oJ-O| 8 i!X=yOn.s>srEJ_4qJ̳mn$d4"0IBC [HpnI(O%PϬXfӍiD׷ K*^ m};ɳI"-{=pe38^0eD/~^rdB))`MCJʁ -ˏD[dծwX*ýmc.̏{H4\I?Bץ.2y˵Z~{m.\]i마y*Qs)WB*ύg b)jH{: 6hwf } ̸'Za ޔ|<=tx.VY^ivHvlGYi\LocT@$sb73<@ ]W,= ]m# 3d {=YuCuUΙE 8//II8Q ́GC;S(^.3{ddNU/;U*Ȟy=(k9Ƥb\!0n="ch̎̾^':UGz"'o(QW'ּUքam+n*EgO@VwFi_-?87~).ev;0!@n[:E E9!UۮϖǼӃ 3 HoaB-{ by,DOp0LF*  X.dx'4 Q*IäfUM§l+?Jk-bsMY z\]_k-dˁfbg3T[~񙟪:?ӑLsNPk/L%hjN F\% ^rI} ӥ;Ϧ)Fp/ rl\tj. 'uB;l`Y IdK/TLKT׬C+) fNmBF?![% $BH3RA&vXѤK0CZLa?zfůYB4O"ՙs?,d.LUdH9ئW# @YӦYcAtwO`eS"uvTtVGe4N>7BZP^~]mA'PQ3B')W~glkYz),0`z!r\[z1U%F7MNh cS ֈ*TVHBc21OWDY*]xagA=o Cbe-M")G=h" cpd{5߮@ACxZՑ[Ϙ5aX- K@}z(`a5.\{WwX[W\j=[ oRPr6X&F%}„kd!F<}ޏ /i~MO.sɱdl@8rV7\(^p"`ˁqf8|y{S;ܟ;Iπ*RYiY3:> K]yt0DGE7ԋߴ{2M޷.J ewXmrg CjOh&n/ʤ`"0féz+Ό{",ejM^AvtB㗛k5+eKI~2l4vMDp$`x0㘣jlTUX,-U<*qw]`O6 FB0g v^gǣ&Ӟm <3c|.1 zIͷ:tGg>!EP\` {1t5k]z1 g/]#Wmbj,JYYBv:0`_Y[rHP4f cUڢyȡ,1?Y^sґ&bUXS# ZS6 'zٷ!fa? OhHHi & :ˇ70w;hW^B>ITѾG>+ ~1x72:(M3/'uKcs4jJq2"*5꿡GhDrzlDݏt"2>hjQ0=ReBw]S~- :Rш-Oa"47 UMk*,nZ2x"H?()|iue6pr$%ŕdrh D{Q.HogO|/2T'Qv23q ~qI94+ws?}*گip琁/uFeHG#|kRE;лيyLKmu-𷨔&0Wt0%JGNiD&}ta䰋O O_CU =&1}9FϼHQfxrʺ|v]2'";zoyݸ:u#V$tXs 㜉y8%m e͢yf9J+qIC]j&׃ty/M4OpL!raٲ[^w^H4-Agն6Osrb~ ʗnr^N%6b|uETݧooo#!uj ]-+ C0X;FM{p+LدAK_齃)!ORd@[q$\i>D,AvroxfMM!$︘;"!AnM)(]J"z'k]&6m ]tWp߅j1‹P)r:@!h-V^ ٤LSeLJ]\i?OhB:eQF!C:dIhO#ےK}pt6H4bfG=@FxF8W1e(\F5;J =!;X+FqcqC,м&zc5 q$7kJt޵y8xw@oFh/jP'ont=mzki'FKٝ`kC̗( nCZ  3TIDBx}l3ߚUs9Cz]ق3ywnTgStBc FRzZԹa@`N"}gRÃ,-pF@?Zs(ī\|BfF%⺱óGJ@BEb,Պ F<]1x!* =: q>=9-2_@N5@GoR?7#Ku4sA3-9 j&0v^S%k H}sCO//]]?zӫ{[v` ,6=7%S9G IN).(QytǧV'>cK #}xҹēJGJ.zd-9# ӃT<{]@X~{R7P8O3KP*oڪ PuW^K&:7(g)Mߝ drseq4H />g9|fFdiLyoÞƁ ݅n#RstT"ڤllBO`p=! [8bKwͧ˧=|Ak孜eOu0Tƨ p_>e؏t0A}yC :F;t֚/B)\GLafXx-ȱuյL'_é̙-2!ش;κtM[&Eco&X% wa_ױǢ(SEr:O57~wm:q‰K>Hr(?lȤL* pngTeծ^i}] Hk(K9cD:VXE'6K@N $z_9 e;/3eI{12:aizi5[L"5GɅp؆ۥ.x)LhYQgldH3v{?q;YYwj]*~F}_ vW\P l vbEDDNKnWpCi(I58jDҝr0*`\wAgEwinY?p` Kc3~ר-l̙gV$i] 3XSV5^nHK_r]Bo7F/?FXGiu4|L|Ag*a#^@k&9FRu2@-!6O0uWn;]8k&:@@d#;;G98M(X^D&#߃IY>M[ L֊͹|Z˖؜hO模wS#W:nlP_JL:v$5 P^|兊tN}.fB‚lLx2{fo$Y <8!;2W/PKxN.ieNű3hO1rhA+}&R^gms61_ZY5f>}P?УF!u*[3b ZC :XF:漎/& *f,= ͑z 5Fw_uWxkZVF+y> CW=dfb[҅5iW̓LFg5I \~;/bpV~f<}AM#r v,ytZ. ϕwIZƁv8NGvℲ6(pm uc"qcB^ff mby:'ONncw8Be[Λ?wk3M`I;`X!i%`DaFaY*Yj5HV&a܎+;EZa2E6,5+މyHl,cFZ0)jZ`=N+ ksRAlNc4]!%gք/q`32LM O&Fljq w PăX͈=X(ч7ڇDtt߽y# F#D8nTE'p9wJIz}bo 6T"QӤBpсֺ$bE;hYU=ib]> Ȳz8{4T(U_}C 34ZiWw}>6S3V$G_Uaf=鐃\8){k$h&oaoc7N{tPj_R]Dݛ>'$3Lp,ʧˮV0˨_2?)U:)Z5JEׅQ,-Y5PB*M#΢p:یمGһ+BF 'yz,9 PO2+oת%aoAmeim,)ez\ AS]O*AQtٯ1=+us?x$7;}*2pPa@e&h,4O5^8HȓF/-m{"E3\?MDOXfm]7~fp!0b,YfځUiJD%Ԡs)xNft W%< ~1˕zm/sQJ!llȼܹm =k9䚴_H/[}ZG0g4ea: d/=dà-~c& 40Ro vu KP } + Dr[sxSŠBņn~hfH9 F7R>F~CY<$yS|Ҳ[qxeޓXpࡈw6_Hj4ɡ ecwq;a_=.dGAL+}zBt`Ugwק^)1CJ*^K Ŗm`!t`扶! w. ӦxȄq311Co^s4鞜UOA4 P9ɇn >xa .b3a4 GӌWV/;*z":)?NC\B|%R@@yEP.S]e\A5$r-$cjr)T?@ls,̇ A%0&diȸ*/g{tRXRuHxwOL, Y=~\[CiEV_o۩>V|&~)k3S~ɑj?z%]0 ި#@CʧWWWTFxv /B3Y&dCCu WNN;nOTRciDnI 0p2CDYXPvmv<fYi)-z pfR&"c%㊂EA\~Od<.⫣ hZ|`I0lj1'>o9Z;;)lz#6l svI/}Y^+te_[,tiÚtdAap'̶V$n}fcBNtk3wDt1t TQM@˕\okZr9A\S{8h*k|b+>q:>>@p&0_$V׶G$M!_ ^?΍S[L or*mZT]P T*~#]vb&E$=>D:x#_I<~EB^ZLע84Oq5ǞE'P>Q-,!b]A~(F1`~EEƵU39 A|+w("c{LR/މ8y@+dd[S_9=U^Bgd{K4AQ۴`=lp!~,.xхvxꃁe!B-k^z"C {>*@A^ @+m7y1ȣ<-^l/ѯt}JsR s#o[ܕe=G*v41\JVvTDtB19hmsH'+?6zds)T'6C~8ŵ6K#J>@ÿ^8;uRbEbr;kc` dBYw6H' nKy$*I|IWIe'T餱Mh5E42rݰ6NC춝Z ᥭɕֵ :1Lt,ґK'́!ϕ$>7l%1Qܦ} Y9Te{fG^$P)4Qx0CQ5$ ^0c^UO>μ qDيpm]qJ QxrNOuD]"mϑʩ^5!vcXG*II\ڄVIR 5CrDѧXKЯzʿ| m$P(YFG>(X:e=)ƻԲ9dJ"A\(sXeޖ,\8.BZ@E|뗾u֥I|ʍz_g%hB\' qj45aކg/chx d6#e^"9s i ħ Ikm݄2p(P ylj3r4*H3l,D44$+~ $mՓ//'#?X *\+JK z싨$==W53ש^gڞy 櫭v{ќ+۽4=UV77#sHl!Av.M l6A!#[H)̀=S5*qQ!Xny5[d2Nrfc2ȍCصR$Uh48tѧ,'7Ժ 5čyu$Y}[sZ# N[=9j,t%H^Xs 6qT./#w \i^}̜wfp!!2O:\f9{RTnѮgwEkkb??ؿDğthkfqڸcl&eW<hJX:={-2I]Ρ=v%39 @\(E("uD(Z$E^K]Hpކß0 8-_諘b_߁ǑfއKaCdVT>uLrܠޥQ#f6H{'ә_XJ!FW(BWk ogvv~%4!| &\ F3P1'9-IޡFxݕC)(^%qqbo6BR3;e'Y3ebֱג73 ʋ}NhzbRe8]Ér tE*M92;Z]o v;c[S2BYSV *|f1=NX#BimNӾ܈k "1`NSY%̘gCx 7.MFn[%>nf%#BP[#`,-# -gF-X.(]Ju0+KlƢy?9C&`]\Rwh;LbcCA&LZ9t61#hNsm@ePl%>:~;&vMM),,7*IJ̐?Ȕ }JH `'nD,m MQݳǧEpEF2PX=`#LX 9$pF`buqIqCҏ4ɐ}firfBdQ2N}\.l9ԑ%6qMIhJE2b->e1%ݭtΥ}㐉MM@ 4$q 55@3@vrɌy Q(}DGAv`\CM1)z=ZPoj]2hKq8R9M-s8tfMm?Bj4ul 8{&mԢ<"f5|> 9fjBO9k7P}R8'2|#W#J=`ܡaD\i=W}Q쇍S>?>kܮ;5y^k-XJf-x9j|UVsu@nZ'bqfH)B\}`dL㴎TV(LjD+`WQWm=VhИ{(#~y`z)u}Pw0Ty>}s+NS] <&7f`L`m센hՕ38zC_ю_LD]&2w9 w$+@m+VDGh"r{oLJ-;oKety\Š\%Q % rc~KR2Ď!rU< 96R\&}?*p8Bl\GT+ɺcAǾQղ%akybz\TS4^2=$ڍt0/ΈT~"&mEޭnS1[_ Bp2ɢp!Be*0"Fjj b0WJs y-|voRɉ)*2UJh KE܆w?oFxҜd$ }z`:i׃;MEZovKv&3gG4 .e]]VȜK,wSo1w$l-ޕkh]t"br twKQ[]AI''>-/F8̓.XHS%ӗ.UᖠS<$^L2xqA^SB%yW9AHMhҰ:fq`VaiR<[l&nM̻#;^QJPרwF;CtBN |a{G@td$XJ *EM#boŹ x}go٥;Zgt Jls  jPZ-鈵5YGI w*d7r@ 4+ љ9}Yv=՗[.bQF_8SSxS>h枍%ӷ#39y]1`?,6]fK|0jS YIaBa(i|L->އ+P[/_ld\&׎Q| PÔF5S"Oby)wBHr_Zj:zi-cbu{,*B-;JР D:7*$3t*GƯbFYv, ~o8Tx |6AJI#@;?y2!+q;d{\ #G|풋T *99 "$9 Xvg*.;(CWab@5VA k%5$n @CoL}-{5Maꏖ}XZ%<~a.߮\:ީlCL[0uC>_"qޒb5nu'ݰ3;FE̼Ԁ #R&#=L2{VL'F1Qhx*9@p*MLRaO\T/4 ">hӞ.F"C6sއ/(D!+:QOx쵮7g#\ZYhj/RQ6[|\!D"9?V?n`>rb0s8i, ٢{szYs -)݂4{4(H؝9V6c:q:uKKlv>թqq^Y"p:މG!͚<{TG?*V -T8ӟ$ʫ pC f|uQv}9 >n6~j7m=u/ޯã Txz$ʾCGHSQP!oXeGahe.!lYA!*Ļ|^f^;1Dц%>I.Td,-p]Y;r% j`@{tYʮ_;#wK|j%l-񻵚81%xy34y<4?((&1ҙzo1s0-vS ˂$5N%;x 8`B߹kpO D>Xǚ rA&.2+\3 |=v PP8jJ/5tp ⱒXQ:hAEc6DS?b۫ *.\o鶏LcjDwr~QU8gH"-/S@/6h+ *R~Άi>7ԋ0nuPySE[3YX%,&O:oQ @&uL0?e jãI@~+VM3nIGLJ)},{ O+q! WoE,]م&5BD.YXXLEoo~z2YN|2'Yn%U# %h84>}ؓxSj;@Bړ. *L{}A]ɂ;,wIGqv{z|)A0D`?RT*L"q[(EwKVu5B(%-}ʵEƯnBU!{>ͦ,ꤐέLLʞx';wFu2jipu&3\vm(]%6zڪp;?whi HzĕMr_e˚I.u7cANu;<atiϒ#ܫL)hh h҇'Z}̉ HlE5kӠ##7ʾ9| ^a;Sk= 6[mA$= RӲaUa-y-F%}|rtAb0EK1j| ]\UZ3ʜԏ3Q6s-EN*K |wۂ=!]עѭC:77CmQczi|A㌣e35ʬ5C@4L (`ĉD3.२Jbt(v.zY۽D_ 1}f_a@nxvpMϾ k:xMЁ79&+,Μy=L){P j{O7&JFC_V*Ja"%!.E$=zy-ѯ"zc4=iZ~qf%("!l/li%!i(ϯ]AT؊u5i!ثq>#V3럑L"%B+Mu@bp$!(w9`/V2!3p,M#|KBply Ƨj%7 #,^Ϡe@/SP[9|{br+,R$QpA_aVXn Lf9Z7!ΦTڎZ`n-9UNЂa)oϕSE,n}p=+$B>Mh^:l9`='})o'| Rۖq&b2EVDWҚp& z WTYW iZ2%I*ZBͿN(\/xڻޯƷ c}mh.T%3'z Ӄ k Y²j^)S;GѦ< DBG%7LۢpwK\vPVer!3ԻTg)K`3\_6 co{G3@S>.Zeoݚ-ͺ^v=w ~`??)u!H,%B)xu v"#$"S5(Ӻ1Gz =:^f}E`%'tJɇ, D-FHI"{s''Q;}^fb?A\o Q?t[=@?) ebrE ~vndSIrļ)į^L5iP+ct\vcއf]-2; G*g`F [X"ñrYƍ=xrQ oULԞ>3:˞H9 e=.Q0jV$γaAr9S«=0YNUEOe#8{>TFy¿`He*2׿)>skbXVA-/UEE4-ѷgvr\_H,$wR K3hjԉI[z7!i'dou݆VpU+nVuUt< |Cz(NW(ղtr./'fr# 2oIY-*|amط7 " K̽L @`NEiһ5?oa?=] qqKrI@̱ܞDWv<%/{{I[`4L ?{A-5FOѻfFbT9A߂iɲ S|p69Bє4|HfMO33zfoԭjTK*3VxXz1. ?UBtaB޾2]JG6袺?LJӞ[Zh2b/(=s)_1o2J  cØXJ<*ZATSӠ.@+Qaw;l=_k 9C0A< 41R'=zmia4MeT 4 ڭ$݀.0go%1Fwz!}aEV^;/!nA e4F+ӑ_WN΃4e1?AS?_xIhLiǐq_-b u+ X2& )Ly~M I%hmX9r3 :8A ۰Wy2غ~R'.W\*$siLH¥zѱ`vh7ou : ҐoaDx$ &C@*)| %jmIAJEP5RhF[WW\9\A~(]%fqMnP<j}`JHfԃһ£+p0JtC \2mFݒp c&L(_v%@4@cn8ʟ9:xy֮L äY2aXOR 5)qe]|:%ֈksóނbB#YQ 5 jyh/ I] Χ51@3bl U}]2AF'%gJd..F~ vܦBfW2I}Ţɴ>"On٧ %mď qCb@UNgfDf $)#PHD1{2Шs9"evZ)VfeyW˚A$_h5:JDݏuqrWRG-󄚰VVK.hYAGq\~ {3_r_J Z갰X۴oE'!WpwZA`.W?m@&7y a#mbV-|).hBxb"&|w4w7>M9MYX?EUԶȫfk̈́ 1j`YVGtR r( ,X2crw\HZrAH6+S'-6PjӞ5P`I㛿ՈGƃnA()6X@?Z13o X!hqDd_׍˟BO럻*,^p_V-,lX(,F'NaL5^BM!q2MoB ;F~[H69x'<#5E_UqL *ðAN\DѪWxLp!)iatرW" ` wy  l(_"F, j;CJԸMuwH M[ѭmDL6-\k@+J%ؗ\w+JaB!bw5]8 ,o!V¤`>czW8yii+D)ݳnRTMTp\)p3^6كpCqi}}p%zpIs8^p,j;+3]Ħwbce`ܷyH7҇jyb(:@sV\6^[XnuF&SKۤhDJxZ.Qĩ8 Ԁ7E8HCAw!UAmu|]rz Gc5 j:/9^.:_$)C_Dϓ@' V/0|4LY%w+ôǹ3Z{:+kcƅMZEC>.^}o.^~Q0c`!91Έ&_Ӓl~`z_& !W [B!#w6'>mF49:+kʻڟ"{SwtΣ_|×~GO7[[φ,ɨ`DwEZsad:&*K/O"f+Ly)(/Ocn񱐲:ogb pj[lYzU']t0؄2S#`N$fI8LKF9FBg?kywsbZ$lz ^Gg2)X]rS,fZt'j[X`#l;-B X(D:lZa3=,Z kD(|:Kz_q%C9K3&jٜ|9.!*5gTcU K$v#۲ jmuY`dΰ6נϔnJ~="CSrt׿ƙrVWl7^KMEE!DV "S8!7#x2U9h?jg&0_(2 S޵}fQXٲ=vk7ntj㻔7c@>?Iwҥw?+H OyÞLlEZv 9{/9PMg+LZZ#b7vYB B@yuY`_C]aHZEY| U`Tɹ}h^' z+&N4':LeB<@:FK@UЈɈQ1NbG@xҏΖ ͂I 4,^k{t mX$+ȔZ99뛋;݂ox׻}ռ_~u BF/ Av!Ra ydZ,-!^às&lzާl$d¸Fٖ<"$Ü9`_%sXoHz"{S)_s3}A̤ͣMGs2J.qhӍJ :͂cpUɻ.ͪo_ndݖll;{vpW) i= trW㱁wALOq-u~Ce.άWC".v)R֎JDe)t۝bEsg/:FhU/á%錘v/C8jA".  wsBp rhח6PF7y)1z?y`~:x Tdր?bЩpsps*+O]*|0.̭42=A[sՠD=# E6ɮI.Fb2䭞6&SkU  9S6Ic˫rElӾIu3_3:abWe%=S4czu*N3Q|ʈU.+n )nRe(Owy( nNl6r)a)̴.k(U$мꩿC4q"\M sn W0DMtD dh:W`Z<ܱ DqL@ǜ<$J` q=b@Cz5)(f$]?B1PU!T I Vfq wkMS:,J5)hXu@Uup FlC 0b]¿XH`6NS[31N/\Z䳾?Z2V3Gߪ#`Dg7pkziXK Ru%Y@1k-TZf=8AE:*6O$_jwq0 j45G4r&k`$:Ɓ5ѽsXIBWc$\呾79ocS mYMT1yrw hГB8 /6=q6+)vRp{:2YƤǤ}zoIa34p=5/M+EX]Mד]Aq 8.$Y%өd8Sڹw\hvW44NLH8֏(If%(~[hl!d@qތ&aYߛ>9-\ (U=aFዹJNA3L@lE-M`5# UCB, 1pviZuKC$q]6LWE!PBЖ=s81xZ:U"&C%\dDFрek6n`iwӕ U쐔K ư.(P\p^mEΑhQX{ƿS Eq\QOϾdF9ūFu VV'~ =!soL|m@' R)&uM $x2M,b)7ֽ:I<`f٠,Q oK "he;AǴjZJɚ4~粐tAr1 fN/X`[k<ڒ7/)JGtMuE`bI:Fb7+ StqxZ,Mx/oD. 77xڳ~AL)=7즘) GPG/|5\T7,meoÍ% D w(Xz;ܵ ckV~gZ(cY/#/ )8j6a+@_}:ʪQgDdj&a(46ŹneQblNjD[*8{$_ G~ʥ9n ڇzc",<ō1Ć D36(oGP("WLI|/a0k OO`:('q 2!A+`Me -1 cHY(8;+jc~q Y>F8vZQlFRzvy{`CP)r@Og]2\ӕ9eG8~gF1&ڐ}4sj+\|⍾r 1]K ed[hÆU)1MGՖwB_$`˩Ue$FdjHY$(iVM&5n޴K+HSXnz^v-h"~Q% [~Rvƣdf-+]ц<|T+e.ɒI/It't]$m .I/ه6(]WҐ\6j Sn,癙lØKs([ʯwޑxr+5-SN,oy1D~74J'JQ= g+:oKx VXȉ_YLܦWIײV( Jl%#qYj]J_.0j.cSlPQ\՗& m idhfvzFjy\.]Ą;(R$_ר؄ @>SѳwDY&O5zG못k'D6[r䋷!x21#uRz0 dn'_oif% CԀ"SŘsu(vnj:x_\2G"PoG,r p~/t;%D[FDv%̈́_P5h$  (7 w5;`.߾R@-bWFG{V:F1*в#!B4.Uh k[n 3|*Cu|,py&,c7^-ycѥUIY׌b*Ӻfs! StCt!I3g6WoDr>gnbciSjkPP Uc` W6"qq_1%('$Eؘp *qfuQ1gP]SorX'pFc@*YB>٫CN-ȵQ+M6@|vvbH=B8 D^ZReTE(1H^>~Ȳ2Z: )P QD UO%B4#ҌL ună)^.x&dQSZ;>-SDwø2xV@Ct -?q_6Dlw­oF;l іk`iHcTwD̸:}2P4bƭZ |q^l\wठń P(c|}ENu p;]׏#)7p8]!8pM*:pˏH'q }{SPr!tQwy 61q>Q˨Kzʛuq;poϬ,$ fDٙ/y3ogmc3[SsY$}sY#:>>Ns3u0M"rT kD;pG$+4 YVm\ss}M߿$h |*: bYW`} H/J#ƥ"ű`ؙ ˸($PpJНB=P(W罱.mf}p;q~}Xɭ5*]d,d~Ao:H*|  FE_2~|27 Bt ml̸c02DdeLT=*=YiaA-Q8Y‰AtnYeK; ]w?qB4AB̖>)9/B%+-i+׊dW3&q)T!ݲrV^G ;'Ѳ@AXҒ~o؜-wՙ;Mjɉi{ Kx@ |w#膗3'Ȫ¦o$k]Ŝt71?"L.{-z -_>LtNfKw)ЈsMv/$kɮ23VS ^/{B៪zR;?؁¹ (᭖%r"uEL* B_`5^|\PSjV‚UR;W!⿸`hkAF<ש1;b? lC5|lTW јq/YNd\ dՀo:o((hX?lH >pq-x'ڌI̶4u`9 qu1'u?VA驧"q7Њ&O^[(>$b^/$U(1Y[^uF7Ld&E%6RKȤ7Q-PD9Yu'5XeQ qPA:umxgU LM] &C |.9ļAl[ediH~Mn:r!pQQWC${ ҖSKg>m6fYZ*'s=V;F6dag2tP>Ốy_6'.˟ćT @3i}J Rؠ- L qxxFҚs_ʁ{4e)&-V6`釐{~SXz EXHx 9y$v)_"K4v2߄kZ5DX hޒjL ]uH &ٽy a ؏O(ҟ|GU3.L@{ kD4F.FJt#c(Έjt9RkUt, v+tnwؾ$ jK{?"Dԋ:t:[(XH<)_:*{-?\2 *yZ =Xu0f{5v~*Y N6#ik7avv{U?1/bSj%kѦc  ytֱ)ey=`~C.3%R`24ňITh hDs3$ B쁟8 a pL=J#}.+ƣpM.KA(߁|'J3O`B4 >apuz z(!*t4(G2DӸPӕ;G&t5"Lɫ ;`5'&*CƑ:k(ȈB[k irh#7A83:g+f`2G]'ηՇ(A>ܠkjQ)JoFKqMd(Ѳxk`G]r9뗱)͛LUIX":3 ,'7M}<;ECBoVbu& xrqbՃ4E7dCc<_F %lS]k +jsOghy@O1*q4<C%? JV.Fv暧_p"KOeDVgKh};֩a8"@(jQ1LzL~)dv~U ^ʯծ5B D- 1q`? q(i6C?x.Ion/G`W.ri,`xgr8 6rE4$ ߀GgI'[{K; ~G}pдP^x-(JPQ7q0,W{~ZO3}cDl%O!b1XPP@/P!SinDD0DHID-沱gm~-6`Hw+ =\^pr8n4LX*<7.(2Fɗ]-6&V'.bRʬmê2µcs{^ag>ͼɅnU"d,crYtjZ_&x͔70ee0@U34J!e#Id*b\&wƙ3GQo6]Ǹ)pj?dHAUU_ihyH> k_Hq 滪NSQ"·b"T ĉw Q_X{Ŵ|QH^n~`a !T3WBV4Sr R0r3*ʉĺ'mRX떉Y Kw'%y[_bA+emߺ *0OkJ?8ɶ>c;}:H͔ʞwSyO gr# e9@LH~^˦]^:3a-L=߽5X)?>VLsԼ/ !L|sYZϞ`ei3R3*RISYF2́(B'ʖi5H yVʊ4 }{a4D!kj2cYfy.πV՘~TQ-QZޒ ;aB~t^cnLX5hcp𜟹 D3^+M~YID) +.2fU`-'$&Uil85Θ1 +!f֭W+#Hfum7Xw〤R*_-rhhS ùwjw$lZA˓[ֻ~)t\fgѫ(&WiyvZ%s喂/G2o\X'Ri0fx/[?r ֏\;}: |D+L"j rH0n/ L幙B|yr$wX#~ 71@mbZZ}QJk&ٯI"|\yB=_C ";}:4}[v\{"!ǻTgy噎\C%:JEr^ϑ_PQ_ټDBU_;bjG{%Lֿ+5.DCWdm'4ŒxP.)(#IE{fa(o)''7|kz$)MT;QL+-T,QλQ>c fu}aNIboijV]BChIiҾYvƯT3YdߖW:\[_Z6[y0e[Wpi{^X z$OHAqJ˹S"Yӛ^tZVXSPie>| ,퀧 O!\{qXoln^SxgE5teUH߅@z4lR1cFn ?(o{sLp>^5 .Œ&40o^x0r>gxOn\)(] 6ժk%@)E>m$p@Y/ޔ #ߔ5 !I0JBJ--FU9HC ;bW^8{ 邅l c Pdxgҹ &DrR=="n~\ eaʱNDv:w1UDk1 <( gj|=+}7 m/Wjvzflc R}Zee6V%,U?pP{5gMDiF/5|krN|oy=XlNvv7cЀˠ.:ga}\2z#:r-SiYN&_PU +"~x0 Br2/[-׋grR5`4[^D!Zrq n "^}?.#Nw_[܃$)OU)r[䮨S pȾCmi\* B@Aӹʢmp7b_{u\30X$#~ov\ i#v-p-[w҈fbë 7ܧNje#FD֩"6T>VO LEd}gl$`"Yڭ]|Ύiec5K"`XEPRɐ͌-aZe7~XtiPҢ%lZ{3C )LeaV;JjOz.3Bi&1Iw 9p_24Q&zQ<,װ2%R;e-݊Q&'fqͷ+=BDRN~LJ`p ^ݏt'9WK[gscZ95,z)1qSRM-adi<cy#DK¬Qpt:a/ANwޥ6+=.BuQS{:W! _Ԁ!;\wPK'mtG Z9lnLLǻpϭmEE)i YmIXżQk&'HEP-z ;GV@d$7ٍ 6oӫ2 ;14濿:2:;}9̮/܁}Re*u3DJ(g+ qw$iEZiK #r\ H{zde5A{CNqv~Ny\cRD0ɢxo?;&TS2U!V9C؄n%2 FXg0#ћ\D&J<V G`~}Bss"B/ȁMdsU/Hz`TlhVH<|/SAh]"ES-iΟF|TSL~66V\ת )MBOJZ}#c!p?4My XmGj'g&\Qe{-v_8P . {XnaEOYo{,ee?Qh+]4 [(q B)sEyM]Mo.IfeSO])Kpv-nMȉ.oɿtblN d}IƱ+y \Sf5 aYL2l ^ L1@F%q'_nO'J[5qV45mp ;qg!Vqxt,u:1cw@>}CumuOdKJWS-wL&Jd5<ɦW1D"R“K;+t K]wkC2m+@t Z(ۤi t˙͏pOL;>,Gljf z1qLfмI|P-ҙ |N*Xnl(ĥ ܭ^?j ?H9氃Ǖ ԎUEn V>w>5^q{Ի_*bh$)4c`^0Œ<ǟLAAN-jX o)/a?/XIs!sA߈4xCyTK CڰZt<ɨpE0h+ R͏[cI,m)cvh .S-vy;=ZBR5S;Z'9ѧ:~޽!Ŷ?G:hrah0.? * S̺N!0r%wZҢck=6y`jE/ӑ)KC Kܠ.=PrH%5s@} E9)J Q_bNHkt* aM) _zaa6IMbF+mhU vž/ٯiuæ]>⡱2x]%PvBZZǭ=@QqU?Kp.g*Kc/ gk2 -5?w緕p^ #uq7B ,?YT݋|$=K3`0mv6ȥLJ +]uŷ+s]LmxP}=+W ʟ~ުd::웭a'=bcumY'̎[lL}_JNgAtn\2|L?W$xh4N(YMソMUTV @Ѣbʓ_n?MmM0n> Yf}SKoIӹhqLSIoO* e"8cGKMo KMZ;G.v8vMϧA NKsjIwB-'PspIڥ8WD;#x(Z}{"zuKx@h@攅dO|m&3CA~+kg( z>LGCMpC _hUѫ͂€/ݲw81,:\kġ P+#BhRoÁ=u 0a*|x#NAfvmސ$߰ <{qXf[gEǴ 5[4&-)x(r/#m6CĽZ`xLf5mѾ0gw㺵k@ 25]6Ie꺈˜5X!uKd])7=p NS y f{{Hy-Pٲ9ilջsPSq^Ӧ&rqPܧ"ǵ2Dl3y/K#-[ErI&$^y:s3FFఙ9?Wƀ}iCO%b|NJX4DCQ|ȥ2J/S`J:jZP[-݋/E%b kdcJ:=XH1ؠdwnR j{u0#M@A cĄ&c]T)8^ӝ熻6?U6)[p|or^=nohkn8Y}VJ@A8}^])}HzbdlWҜ[v\HMlA!Lz*$>q6XEiՏwԷu@ *n,C+K%U7Q)J6|9"'Xv bGA@ܜ1jz:F'M6'8gZ wߏ 0 R>س5S$^1W8;Ln\h iA'њh&NEg+.l,%x!:vŌZlԡ>/bS.zh' ̿t<U;CO 3%(xD;iWdM,nZ|w,+Uqżh.; ZԔϧzR!At5޿m<)L$8wDXni""K^wIuzw.f; e Y[}9)WGWZM^[sŚ9I+ŖV٥y HLx:ncWߴ`bȥ(Ɠ}15/@^ z7p%As 2v^@V)e!U9Z t=Gd8{c18=!pD^A~Щ>E,P̩2pmW'buus1aw점L3jɹ+ vf &ٔ5gf3{ X\P #ydA0Z Sx=AāO$:2؈p-yo =b!=y[5@uTɍśEu,P#3`;~ɗv>w<]+$`2x& Y OGŲe*fwWdYsgc~ՇMFh&LZZ@Cݪ4?:=v Ej=mHNs`/׾Y [ៜ$'%gK5(ܯbeWԟLJJxUFyy36ݓ4)4GZMEx )#]v@ڂe1K8Xt"`:(Q|^8?Vb_s^3($#"~,eUTnxꔟlOØXgTk3?JI+SvspOضyRehb!vR^pA+U/zKZ;h~]($ۜD,0H7O .$񐼄Twp:!2!$)Q~diDwOI |yap9cs T/X'tʢSC]1}A8;.Cw˚':W_Ñ&.HDHAG37+E:#յtEJtLY{mG\G 6*^iVj+<UΌOK;2Nۨ_dif<jjޯ5CYvpJa۳v-;ZISeh qxSjc"sÁ=樶,۝YZ:&ى2wtKE[ >l J\(*t ۭ5֊Td.`)`nc41QoN8H1OOfd31SLUJf"/"YU," `P7bƥ~} $jl݃{tʫ/Փ"@^j:OH K^-5±kA`JdQ@Eԟ#QX-h_J4^<ϐ bpNZ=Fæ%N: _/rU]6xGL$lq`#Ekj~(WᎤNZ˙&_}o&.EC$dӍ>zE5R~2nWP@45ȿ7A1o-46lL4蔦GzU|R܁0jf,}lZNs}5s"P-l]fXy'_- h$8i6󵶀mp@!ܛ^eM'^H0rv9H=痰Xgx[]sbNg#1M70\~@@w3s4v>Ѭ"Owe Z\Us.Y"ИBe,N9`wCrI PANiٔk[xPp7b#V`1hk$<+jCe\C(HOGΥʕκt2Lѱ*' a[`2V3"ƆB2%MV);`rsXG >>6UrKjֱ+1,wo/7:b],.䑏Y>7=ο}axZ Hʻ/_Oe5C_;'θlz§|nYь`[x!s!D 7ȑ۷5d)/6r:ԩ& >A4.L`ۇh)q 76|+ΨlKDz-#6cTp@ Ww)a÷zYb<`(_PV0ݵ Xgag?ooR:Ħ*!J^uSyF( %nL  khE>z6u{Ąjsem /<fla": D/wӲT3.(͊m| aG\ӹa[ =Fdz;;1j9xsRjJd~VLI?l}= 3w$790?HJL'Np¼UGS ˁH w9{^ !PbQ[.'.s>o$lT" Izc1fW8LSj T0sΚpXq0x6}wa6'yN.cp208B[E1Vq|1XVb*뚸=Z~ݻ)UŌ^`@ Q`dNcԑj3:[M/q.(a4o!Ҟ '?DhsǁMwBPDl?9YǗ'ck:$D?[ų-q?mt9赐ҥaONoj{pI3>ͥe{pFo ʕ Ū).=7jǀ*|/#iy1-{7D9&pr J}]ѷƪl8ɘV>qbwQ/h>e6>xpwp1RuIx{Y96qk9ѼA`KK y1IFF]H?L 5;kE3Szluncڀ:0$4gM*\= ]޼2ۘISkOW~qRJŤkS6Kp )eR$o.5 `ApuÏ-yeZ,*v(=U;C5j\Gh0T-[H ;{}~HH_+ac_'A`җ+ {Ȼ__!q`}<_37g 82*Qߝ9WءyYKAsʮbL,%_麌l{x M @P5:xDZN7vR(6Ϩxv]-^W1XLUyŎJ@$rJ{H)EaJXn51d}ijװ$`+G+/C!dx, 9~ajkƺۓ:C-TkBWE4LN`)"omK8 E5RClėMf85 xإa&¿y"NQDW/F`"w!:XT`!Z + ̗x5wREZzxh7 ^u2]C;lB rPm;3ϯ qf/ ~oj4~Xs@Ckr0D@F*Ƨ`U0=Et+V@ҞD$jz7}3ς󣨚UDGm<q\!v}-x!MU2^CwD, v3œ4 5hJJuH )QkP?-c8@ӋxS$r՝CUG1;r ̉T\"J/1 ZM==(% 4\+=la ; 2fjx P4*g~nrvdǘF!4kDK,3zM*C+Wu~3D5D 0y,zWlE`#y+ ~r'EX'~Gaa=b)K+AXXcѨOK F]T]QwSH۷La t/,r)#גPmG&7OYpX[vpԺJچx Vbޔ'ȏqg+!ь~eRI೉)6>:-`RSbԯSƣQsi3Y #w`W} G-[($O9{ {(vP42h"nb{Amq46t;3ܜ(Zf0nW}wϊl /OPƀ112![*6GB *0PuK6gyx6x㓱dƺ~WkaOjw?g9 Tv1eW4 drPdrINvHsRTr{$II#2ԎDMآFH) Ac[ilo=ye3W(LI"^u9$׉ O;Ɠ1PfYw| կ&jc[*F3y,T[&{ "xjyGkE:/SZ&J@2W4I#,À؄i٧ܑ4Zb.Y\Tk E &5Pm}]X@.=Mҏ߼%%f/dePPsvAs%,) gH8zqe} ֎ 3Z᭍J~^oq/jrGsS~YKS[q}Ic?x uވ)V?+t;Ú8'y E[H˼  :&Vd*̷ﵙ$񊥑ɠJßc~}kE/$3O٨~XznXp<2$祟͙uu?CaLq@rbF:#tĞǽcG6ttI-1?m*{v ͏YuL]YkJE. 5u)ڨaJ$a l6;3\w:FV H>UC~>/[!O(9!ߘ~Rh>7}tƋdF55Gfˡ9RQ.05f91>*iDSi(Gp)AˆWR1&.bϒ^g IYJ@N 3(wmC- !{06<*î6,VgOT3!?`i,ޖmaE|XW1>j8 )eXalWO'LL^|%68$5pl&hhEKde>}Ӻ6//T#B$~,@?-?%q=g:'%=ރܓ.Gd0rv"࣫@,g#ex6j.κl ,f5sN~m'&=S`}f-49٬l}cdn$oq1lqLcK.KyH/Fn,&u *[5\ZF @+ڑ4]2e޳y)#Kz]{&b4_  RҖ`!Z`Bxfbkə$LO{ڵ0!Gc$P/]LO=jG;N.UW otndꎝ8J$S![PV};x]/-ì[?kIL~ktUZt<\G B}# 2sNG`a۔ ,[ ƙmA 2&sA;L;8~`3 s>t'K<>Dq7qA~DxnWK<&tXɍ HY6S$qr.k#Y>, ئrRVwXj"CCbصCq)[;; ^< aMaQAZ j~yA}A6r"ҹ1i.~ݶ\\W)NA;Brw/$4eSsn/Ӹ)*Rb IIzZ[GH))7Y5MI@ lrX+qS'|hï@;`T|&rh$=89Ίc{ӄ}uOA0>oߙgDX*,(8S߸?qn⎴;Y|EȖ;\*f KrܹIKWk |": K!I!ٴQ1{~҅ğ.Ts}[U7ZP(U庳l_ޣ1X|avTI>kR|;d\xɶ'_T=,lX-)t~-}LўvŶPK /&`hd+ (P}ҍ&Lk^b1N =[)'}1ĶfWah¤#I2GLp?Uտ=rP K'ҿ/ɸƖvV!w(/MP 7 ٧`hġk/tJ?2%H BjlEXR0[?I!KQXJkxB{l6W>+r@A{HrSzFV1/l̸jK4`U8U>I@kxl)# *$DŽT՗ lr)1jPjyjQTWbNT uB E :E((l9;AûN|]e{-Ȼ uzuӧJ hܶ;I ֙ y =ދIYHo #gԨ?\pU*J?HH ;0'MXu<=>7mXEhY.d]GQ^;L3ҎZEE5_�S%pJ4(j]t$5YǍ*8s % ,Z 3j1+,yr|~P˭.4U?yZܦ 4,kf[ˀ*m2gCfJ|\ dNK$Z%l56eD8efS!'3H[HߚAoN܋jrd/x H^aW"wCOKo[ IOoS+)tka|% +0!r(.YpFE^KppFSٌ'¤D2@hh#E=1$r3bR\{,4Lͣ qSB(N6 CfH(KbCcA~ctd]-C[EgS|}6~ѷ%w g0gP2ŵLc|X5U}|%-"`-C#75n$7jXeC&B/# YwTLHt@M~L&e5 $hOVJ3T T5T&4N6yD.zSL85㟶w] ?i谊{9Y2vظ V&Y[jf\g[ Ͳ)_[&Rg#BFƳ Y\`C}νX90f*Gi/Csb{~.w\i9<51o݉1򈅢r*&M"s&`k %-#LMH"R=^*٪M>D0"ьl8Ct/} { QM1nw3&PJojfM`zBTՍN]3Gx;ء6[bJ悢D(ݞ&rW>̎y{M_.O /hj%D|=l?_Y뀷\vcV(Bq.3E:h/_^sip†LEdߜ!&pLqgWUEwQ(G_d\~ ?*F2 \'wMV\<89ߕz.873%dd=kV7\ |/\bЈ.忲ǣ&nӺ*Zᜃol%USyRóG=?9bMBI7wDvX77:Pu d Ds bC%Jj| 7s7P#O̺K_/E6JXR-~[d?N?f3ΐ% gЎ>ÊB3b{ #W$VزJWOieNȹ${TMmVǸj^]5no ZWCUց:6\9)c-2UV ] ' h[[>ځ4+f P͝thF艀GoCrd%6^T*$u7̤ky %tL S%HU4'j3nJ:k Ԙׇwⱟk)a6%~S12o۟M"d+۶wdgXa1"<~60 +S-3^==y J)'4Tk# H<0Mn2܂m@4x:aEܪqfVLrv}MSJ`z#c+@C#_צHR[.)OUR~2}+ {h.=WD$:vUՉ'^>өx=- rv0ç[Ry+ As'1hFkUl@mM:ꈡ yAT?Tnp@0SӎscYx_K(CJ {d=p 'g,pʟk1>ŭoC 7-^!QO:P/Eƨ`y.K)4"/A偞ADb,Ϩˡ/wwk[c "M"r.}@mtX7)m#6i @A]&MtUfΩ\0PVmth=B0$q.}97yD:r:+ʹ[YB-`?c}!iD Pe8mXc$|YswkW@ Io]x 7$ ͱ"T:F*븁HoӰQ۞ZT)|ijrG*)E$Gemv\7˴(xz_`i~]C|2Q̧~f){#B?d1"_ zNJ$ lB6 >+F&\FY% ڐK6X7P_XQm֎I kP…s{ ^:]#9amn&$*(z/a.ƁaxA[rfuO.I{)}gU}^X7[cr :NeqiiKrKGUtw$6txOlծ.8љ 'ƒaG3 *1Ч@RB0o a!Q;k#Un@grY։O6{c@tsڀ*3:,OV6+qV|E1D~|q~VkM_O\9p]@.[EKFt}}Re#Zb3''dЫ0o rf%9NvqÅ՟5Z214Jr![fDYPՁ,+L'uJ/@w6WC?d/Il՜ՊS-@2:/vq}8hÏUzkjݕ/pzůEM}Я:穧Y>=XX[c=keB Q_?240&v.: :*Y 0`LH8 p&ԁ@ VL^6>t뼃t**),?0SKXR>3}o7AɐGm`o;8mVa(zG\a`ݫ8%,+bTGM8diSH. d{rL0BiVʆ̮Cj46X@b<e=J mn׺j#z)n\ f^Qkzmp)l_ߏru\p}b@.Ms;~A@iCl5pcu:?G$9bfYѐ$b)1)ƀ`*H-Υc֒*O*؀ϩyά4z2y@=GDN HBI ML]S̢)KU7$PeU^8on̂ؕZc^ n6oCqЗDJW1uVc.(jk6Z&52NuWTuˋ YggĹ T é$,&+V%;Ns9:ٚ~`0s}h ͋ L:>*{Z@/7CgnI .o݃.k'1ۗ4yࢺ/`5(.β\ &M7ITi2M1eǹj@S$^0 {捽_vtIR-Tص { v-o,VEcbzfp.AdWq%zý>aq*;UfϬAvR50t 0!>;yA!οlKGu0?׹6Yye[/fPcL[6jKXLB@s~EkȿFߎ+@a)VXˆ}Ҩ>\M Q;Hu DxY)2Z6KDfSlv2_ҞzƟ}cC>?(b/ͭߗB"\{{iMp>TzcfnV9T@Ä6"ʣ%#[Dh$*%|̪lkjaL2NWL%W6c5["i ,S}xI˘֠\k m{t}S $ o!JD_Sm6(p-+o e=-/'_ j~!BcͺIg!MdЀqrҏDb{%K~6Z* w6ہ/qZn*!7D}9uwx1Px%,Bʸi7);ʒOVRMD;[ ۤ߱4ɇ1qP=hOLծ1J)tz?n" `GEo0#5)\ը; >a F3eAxmM-س@dy,q7ڰ|i;EחkZ\q3 tQvӋ&j': mgٷ8u=yb*<|B4~o%:rucKB$GH 6jl*f3܂a=͸:Q5bJȗ N ov)BbDIiuSĿ(q|]dN_T^X1W}tߤyԲZHHC _ƀoL ݍ6 eVoZ~*P0 Kl\d?'o&&&#e{oMA\(eSřBExo]D-T0mbf:})| /P$g(#O@VLNT.}_M1oW'4-A;c1 z,amI9 A|,b ^žS99{  dE֤%+: @xu7nRS =pmLU%LWB=sgi-[ ZqICOHG bdmoi0i8߸KY~uހ5 itx\K+n1j^y&N^歮Ql{+ݲ$@J_el;|KjPQ5o C{s\ ng9@HKse-e&5*'=p娯q}0"7&钽4ǜcV2<+"Z\4G7,N 'H/ׁ)pKDa@~/L9@ι:1>B=Da x ǹ0sQQ8{#wF+Y.pFacؙIr~zAZ~03.=‚y [= LLJ 6/8FѢ$HLނh _k(M t8mrZh@ke9*!o~jWޭۚ/2ol 6o֘q?͈9Z~8' WEi 2T 9")|@&4HK QvǬ3uU ^Z\;2")}jve.[tR=ghŠo^*>\jTN v '~.m{^h@ Xa!?j!2:h PoY4TN$#mPU:Z?_|ɀ[va NJ_B'DP/.e ܖDY|‰],H}/panvaI_4g|uGALA I˪vhbr1)5!{ :iTRHt*M:YSxZ̛!Y}L0zk 20{\'[PގjH LLn';Cc{=R<8y,y܈Ct-_DkSkxnVbEQG@GR19L 4!p.x.h"~L؟frJI쫭[yiv$ vpb?9c^Yiejش)ԍp:&_QfdF7WݗYT :jtL (,J.@2z(l`($)^1_7vZmcM$rlZOl' OoRI>+{VB^[n J}CGF<-6:Bhܒ]tn!x V]ֻ|]AeW`C= 빼xl]S"sN$j~c~ZpAmg0Fذ~[沥[Gmlk1fv}=V[~a-CՄ'76( R.Mp0ҔF _ZGS{zviR{5ޞzOc7m':)ܽU1ۙ쵡˛xW]'/KF_uiD`mC@OnZ,|DzT@'kH+#,ƣ.:Ő˽moGUHL=i{ &9$\ާM{U([JsN~~0!|pSZb99R ; 0IӞ'FN%theY-_K FzsJj[`c *dwK{A^ {΋3pͩSxs`;2 Q8hԋW!ҕSRbR-Ojeӡ ~bO |FncExZ_vRi[{lGjʐ<Zg2$~RXl+>Hl ?XY!FRb S-5g~ԃnzsL`(q)&'@SQg7u ĤٚMhF',]0 mRF4$Zne}p"Ildl{l$-;y<2?#UYV]F'&)W+pN1r|](U;KIJ_uT姅u]u6*`nF ԵB~ j?~A"t(Q/3v)"2 . >cQq=d74ʖO )YX@8e W©&ϷW" <Pnb1epyD<+̐izxW~6 eЃ87g FE qU ,d+;yO,=M! OWyMC6TOR/^f]3~ R~'\;FFRj.XK΋5WB(#v/3$mJwڊ7FI*0~fڑTWAIÂ|Ic! qBw 0TwK@IS)}c|D) [,&@̞;UcbxW.=_`՘󪊚,6Zn E^rx.+Θ!: 4+^) oN^&+dRFg/xփ?N/u L 7m[ ߥaWj [\4ebLDtdeJs Τ˼ͮ7 U)cl.1Ra{FnPRCA8{>pY~pRSEC^ړvgGSg[ ɔ3nJ)Nn<;.<gA{[䨍^ ute*W҂Kɜw x($Ẁe63BbC)z.v7_u;H[*n8kHztgvs&ՑGg`?R8l×J i>zEY݄X~x\[l$GϙSn+\a^|T4A*}UOסmns\'/nH\?Ll)oaj.+h A~Z3̞X=E̾ƌR$3ڰV=ޓ'[7G@oڸ;$!eh0XTT*T2Hxeh4LoQgJ=%ƺ0*/Oށ"˽dR~&x->U4C[B@%~E}TClb?טVrZ8m̚?wBCtvfpQAu @+ 98$VM쮮!Xi|i'_QKeٌ(:o!ܡwXm#pNm}%A~Gi􀕰:(4>[La]N#~XHL_+B`E+p÷ uh">& ѫ}#Ԁ`G jV),J(]^ Š*B:$^q#2g='NK ]]Z3[4|V.7Iyki\ 3'>a6?/iK.\F hX_76Gև c  ָy yɞoUT˜U9HRu"H7$uO'IЭ? ר` Ox޼=HQl"ګ =u='q҉B5$F"lSK仼%&vDEoBsű]zH ۵x8#'nNk->Rפ,{wjeyjd͗TkرረsXQ޳CW }H" z߷ATmJT%GL.V/.,osVHQ1s4fE?\[$jw$(˃_F̔\s<~&|8=>- !gon%>Eѐ3 ne(2ϱ~4REMDjV.p6cYP16M1YHIU&mKg @9Ei`3oIEHkT_C#Uw)Dy <~ D 6MOj׼`Vی&W(M[_vZ(H a=sm3}sK>Tڄy*e!rU0 $r ͞/rZt>.O39tm `ٱܥkIgy/u1S> y2t{EfS91 ?sb*e㧷 L كm8,q5=xy-p`֨C z[ Z5վn82O yeRB2kT]=( ,?J85Ц8.W|A=0Vӣ"v෧>g.g 9gWƃz!X'PгJ7DS1*0E`NC»l+0x15ṉca`w)zDKH=Fy|YaƳIU@mj4ՂEd5+noa~ڛȇrfT].8o}cIlf*k$+s>ƨdD(]N HggfxbQO긞@F2`,ewsV Rէ1]`y21GZS7p@;}'9"ȯɱK<kdXxp|CG>2Kzp}`'sO_]U3D©hc䧽\_@l~E lwFBN-I3wdt Yn!̳ݢi"59kvyL%W؊,GW g J.)\!0iO9=fOz#O9yB&S?Cx ƊT3-I2zDQR$?xPϑ xW +t:r h⎢Ծ"E9`t](JG|>{q e8 n"nnc,s[I0UIaݸ! t4?SYa@rR/&Jc [m_kcIt R?.S_2 :ڰdH]N(xf~./RF(ݩ2Ac'hͬ!C/rZo u4^eqDI+3 2ɛvDUK?(>bNq=twrxq,BomJd}9 1-1Cc\qYwiK Œk̯.^oԣ%[y>1N}ڐOutسB7%1ulQГdRSi0ZPLy_AXa)W'f&U9[6`n:Y =_gQV#0D"M]'MYQ 0f'vR&6E @ p6k~yR ږ|zN9oˮAQu1]t"NA!T-f^zuV! :[?$Unj͘<oTkuFɱ3T͏;2_Z廱P`O*Z/Z_(lz8_Qύ2*>tV+J!1q4 jHRee4ыBU9IJExӭ"$1)vy_⯨ܭ1ѿs@&HLc| یJxzv:Bgd.t%GZKe49lN V߿j~\TCfT4b)"`zyDͩo7ȱUH٥PWWXd[A _=6^S{ڞ]!1Q^k 0ԗyUyXЖyvLFfյp]gczg8EE.gl rqIX#lE(%vᅌ8ꣂH1Zt>jáXfg~]{Sٶ jУ., ź=pp94y'O>#ղSfa%s+#G9fa1.@ư놦,5xs*EyޱN$.n պ?]F2IhrA5vuM/:䯇p&ɴ t D0iڽМ?N3%W"}Y Nj1NHV8mlUCJui1ڡ\\SF;i`po;Ɠ+ TvG7z,_QL0ɤ {褼*WO;w)kID%׈: ߘ=!ԧ\ J(ڦ>5Z2"F1BZnҖ#䀁Gc],qսюͤ6nmyFEݸ\ڷFOUJQʾmt7)jCy xufLsvn&TS|FK,wy(V . O)h L7H4z}.Zf Plnru(vcQUs>=/N<|!g;6PtteKy`9v}npR.ݶ^{Ք/-&qGt) ζa4PÔƥ14)COSj T[^4x=F"uov1G ,6]c\v]_%X`5Da O}-F 4,ƍgO(*ZEVu*NIL-[\`+[6Di7:5sB2UKi×"ЍT3L[i ؏X~sLAf-xDMiÁj~lveZLղ:e?Z֔DR./֑GTW  'aŁzp f2tc*3GKQڊ»xgmGƾ@Ey ёtlM߻耇=)* 7W&H'QʳW,2G/.IեAJRwa/SrѨUE7<<0t\Y_](zlfb;,}r?D{9 1M WoNٌ^%~Wo!R[mq(МNYӓ%DT,bT{> Opgε|g/ӅՉ#!u} ? <@Gks/:*g1QCO ̿< S 7bAI Xc]^1ryzk91w0KD›.wšLo$IkQaצXj#7Ѭ6Ɗ)F;txG|Ŷ6xEiϢ'P R ڱ$Su'X~UGC0JP&;s_y X;9ϱ  ᶖ:lw0#i))Fpil^so3-t"+R͈ "ig_iq4p+c_NiBWO63izå|Ҏ*̚279xZ,:waV=Y3;.n-_|C]ļv#gm: E H/2աY5FmoRx ;EYҨe~kT>E3Q`.ڼZv*?%c>eȬ  cvPX̥'8$$r~9X0Ue#F:6 ھߞQuPi(3 ~A9%R 2ti|#1x˖j}B"8R=< -I pLЎ@7< wbfOm \_Z+!WI.\1Doy Js-v{|rtfVe6ʈCV{3,A۬n&/sߗL8 \t hU 0)d&Jޑ+G>5?&~[@{esr%F:ΊYdw_Sʈ?AfZ]Q\&s%)jJ;jB7aOqG>DcЁiyIVO҇[HMEb\%+pi >;Nn0z>wH8 p.>BDxdT9ef0%AO=ӏ2m,lZ14MɕG!F&:bVeۖeiӧeQjm3|Rm{u$I-ga1\;i[x#R H^qOz,ukq2 P]Kjocaڊ0嬈&jo+t0o!w<Ϗ= }2,o2@gJda eЫئC br̮a"u##SӀn3:7jPm5|AAFI,^}QFCKP%D쁝q?02ۍІYc|\dn~ =*q}r@@SY Ib~<++n/K^_9ѕg+gDZ[=Y,&i`Awk@|0i]( 4Fu͞>@ik{2ܢyB(B.,JS'q}=Z@,smڿБnX3b89}TmFtjAT6{${03P rt"+34xHAJ9tF o 3_F'^we[=7[aDBpe*2M,9&j2vN B܈ B[cp5PuqoNtDtJM2.qɿpHWK?s`$vvܼM^0!0Yio /`s26a8ڮ%fc4&v\a1mJ[ŤD$Im&Q8ҔHȐ|-E- +4=ؔfGܶ 4TV)VJB X?⽍}kgd4KMKDl"sq=+S8Wnٜpa6_aRP==W呝$hHt",93n,w>od6K8 }|l' 5zh/H(*UMü%qbir3AF/gb$41D9C 9#(\!WWbG.58YM/R=bz YԄja: %A%M%= 2vh/O=?ouw?wIݡ @  ߻7G4Zfbnf&G,6*&7-}io!4~g ;A͓:+2sxsC&%W#aJ2ytv\ J0#WYtEL?%9St=f;lB?P'FM"mo5鵻%I^7^G?C;=R99a*ב:-RFPZ 4!.xgvq.ҏdV@@8E}3}SƝsͱl[jzX-:J<&m XE'8jB,eHl&/B٧$xDEjM@N%Tw$g{7EFxc?b2Tj7:@>/)e>Lk%nϣ1\ M)7N\@D*NHe$]TA @yJ6KUAxe^)["lo8=^$/U釨 [׿~֍s"1W9LCE庆ć2Zd>DFUT"IU2nDJőnt-NbߘiO%OV!W6Sk;7QcV]5@3Ga٧2Ab0dQZp+E$ pOq&oFL:b/SRls[[7ֺRaM^#g2N:j,Ԍ/xa#=Le zB_S@N,Jp~NHv]X<4:uM˛qȸ?F k(}?%q&G>s8tyPF1dMiW:PҠYy{kZa;&A6W=8Q3$s21K(BlW>! =(g#}duo$Wr2Isy7=E]l[*fJιC ,MVcHa.lŬ~fG>Sz7MM`!e:qڡمcAav501s}Yl|ﯦ3Nh^z {33ۀVr6ҰsòIo.߷WS%)&% uG߃;00nj:)THhgC;峒F-X5G5JLϑ'D$SyLp*Ҧ?nMqڟ #ɢ+?;W>Ag- WƷyVB N~ՀyP tFTej)@rmQ'**‰,5/şu /g 3h(be.7ӁVN%Z^%2Ȥ).5GoqoNl*^ָ\;^ mV֗~{[".#8}zV2E~x</AP6rXdm~ x!M24']&&(KA2E$ l ?t nZ " Qr j\")3x}4BpVH!/ cO/wF%c^/f849Thָd/Iz:hʚFfԚKQHߐpl;α*7:eB6u#Hveʌ_4^NZ:ч6c} %C[WQp62Vx" P )8t|>3lTߐ߫Euڥ]Hhәgf3s+T?m0XϼR\y`2>\+ЍU [5˔үoQ9*Ip*Iy 9G\G+PWV q} fo3@W"}tyEbD2FB~T5EGbSTg[ct?;dk!yL[8A.3~# ɸ/ EmfKѫiyК7 /DNxOIoeL`2،vj$M^0EZƶp]^yO؜v/~*[OY^p#_rĢt2 ^p{B1`׳Lo6~0T.16F"Tϊvk{D+`֢<?$].a* UZg$]>)&_A,y|+XL%oAlQxЭMW2b9vfvUvSmy-{1'8-% Bm6?GԤ;5%|?}xqp;ȼ]\KBih="u &Pӹ!%,5>򁤠)ӳ5 ѽ`Nz<` sRMy}D+,I8Ҏ#{0Oi)q(W}>;j-ՙotk݉KJ>;$+G5&Vpv)T\F9RTɸ&& ObKI^ǾuK,DKwYoihEJ.?!>O&q$U}U,,Yͧ)s0pBA>}plTd%Q@&{Y7QLϪ)؛=;*kl|A-!t0(@KC!"oCMVO4}҅r }< _9 +d7+OR,=߉HEDVvs>c~ָFrV,|qPz[EJ%'>W$ Jz:1d4v;ډl$Q2z<ץra(yac[AT=GK&~0=gg$r= qqZ,M;<8EalUbA/G_ܺ6:E#Ό|54y2?1$z?׉TJ!fnllة*^DG(Z8 /{QOFèӊfZZR"CNICW:tI߯K ZaQ>]g5i1#,V1j*ȩkt ϡ@2i\X׷)DvE$Y~@ W~*5tpvr(X~ =':͑X&CԿ/T֞jtJZ;[GJrђE`GFALc: fq)HDhd#&js,n/)5 +7)ݎUGFlN'>^2@FZǠZQ.,˪DÊJ &af`7^bi!!0\-j gՊ(0Dڏa#a3@ޚ&V̔[Ј'xBDv檜z;w63s6Iӌ¾񶸚s ٛ`WpiՃJ_`+AmUʷ`gdM\P0ɴ@^kEQYʈs~/43%2ՙڒ ,RXSM:pf=Ҕ:)h4F3M=c{a[DEPf3.\꣄scD ect5W)7 }bw恨 m~,b.t >ѓ-eΐb>ZRlfQ yl:SUl'Q_{ UȓQ 3 ^J_V* rX S~zMz3-LtV*1G9 8aO VV-ĥ;47I]@-QE8+4xW6 q^.4+ݵJWʥNxdf{'55, K m^>EoA`IТd+/AxO31s!-I3<-v>XڵR:pեC8<×߀zd,,_lFnz2ZP܁F<07_~ab& 򙴘nA!uY OB:25z}Ko_5 8o%aS p܁\anTb(_C %[U% ZhJE2?>U{BhCY؞ɵHi5NceުU>B5Xtٚdv[S\GʈTY吥j\x?KrZʦ} Pk~HjYٕ<B zFnouYc(Q P֢V[--\Wn/(vbYDS+קOUi.~I` di2[[mf`6؀'w@+"L Shܓ8,p+mX!US?O:@u8?1۹5HteUSכ@@Eڎ FN~~4PLyz#Hc7W{ *+Mr=F *zK+C`( ĩwP?# ZqSgtBfߧv_] TT޲ mVӋf TK)=a .3pQe)x@u'v1"+wf,Wh(؇.}Hgw5:Yj+$.Rv~XUpCWl-%YkmJC;6wCr޹ʭ25\Xvy081C5ш-ǚABk*m^’u'sF\;2ݧ&Aexe(=*c͖2KMe֚eVcNP/A78iD/y ‹[wR OLͫ_bȯfT*dozH$M.b{a}Ƹf򲊠r9XF573ȧCp)Pk/ɧOF7:G%sV~s{N-`M&c(N R()T$M -þϞgFnE|g`sduRTIWbK!V|gߘO&e P?kUD%mVB/˓LSgyVl.Iz!j b*DLJE *N9>ڢ:A{‘Hdv :z2r5>_ 품o"n]/{#mں%FN Jn FC`bc B;-S뱟d[ ,H,B_TaцޫO-_}"RטZ3ߜifOºْ .i&@oN<W\g[IAqҭe dksy}bόOC5޷Д~ h! ? k % UTR2h"vgI{c&S(4@3;w~3[$,Ec56,W7@$10ռڍ"SҚ 7>dE9ԃ{v@^aMKhpHa0&k^شYJ3@Yz&Z $19c=oMy3Y#::hyC-*yJ-G^6 'RH]ЯI-m1şi[@] n"MX Oda |˲@^gUBg_3$"ƑBgZͪoCǎX$x~bz,IS N1 =kB VQ)/,q#V[[-cbbN8YB(\NLIX*t.pZnXW,-mb}AIwsH

T~x>&^Kٽ:)oޙՙ7RqWy"*_^Uz`fl嘅t<oǽ}OYs$m\eM9X6KY+-qA$3. $O|M}a0<m燉(@Z)S\}|r#Go-Ǣn.LXif,h ITSoX,x.4&YMg3Ze#~)_|܀!!\:`͞HrQp5]8+m9zz[G8޷*ߦV6ޠ1!ncָG^~ː_/S8 D+NW#9W A E?/{vRX;^G.3D"1}>Pa Mk6-ۼgS[S͙I}^/NDI<)bn)B'{C4XfxE`$)qD^aHH L!Yx{A[@#%n/T5gyc֓rqd\,3MA?4dWG~|Z$&3Uªx{QY вs&}*,7^G]˅S>fL`sUgYM}#}ܤ#Dk עdK?$=ևTIOi<^I?<-5RdhNW:ZGx _ϒ|cRӢ,+1d.ٵ V点?o2X+}ğ$u יqݹeBcpf`ָc$![AWjI #Fd+GOPerNUBn@!z[\[w6Ϳ_얶G@:S7+ö=0[ !NH#:_xt\͑euԞ-MtZ1ZKš|ڶ3-*@݊)1o7[:T<ّ +{ |r&gbyUU`̥zO4 92z" /Ogx9A@2^f^3jfLϸ/H̃|6 :H1Y3 ٩Ey̴j_:xGG@RQ ه3 [~NJUAUgc-Aeեx\| fU3gw;rT.ΞȩVEER/TJd.$\=|GTL_|nY5"+f᧚2\kK׊lk"p*GbTHnZg;#pixM2D)OB*Y3A@3W5K̨([!deL<Śa#reƓ(Q WHHS203zJv5H|!46 IW|k/?G*f<]S{vzOr.~.5n)p fvSNvP}H z\&q_QzR|9nmoِ!>n=OulUH9'Y\ x!4NjH8T0]N&_˷9;# |韕 >yZ6A {9v[K x a!\ lh פ鮌 fǎ>'Zvzsʶa8L6߱++oUqvx(}m9CezVt1QݢFcصM,9)+({4E#+⏷j`f%Fj4b aq_+p[St U Ǭ Y<*TScBs o!>A^ca!86_4)"[kH1>./I,ft @'nwSL:\j|D )E)287R(!YdCJhGڰz߮f{aJaڮUD]b%*^%iX-9*SD C^Ȗ Gy0Mqx]!T]\,~eY_SLNAoSQQ';Em`[ɱ'UO2gjWrƒQ3L:NsVXLX"W\Q☐]S3o!/cڤloJ/bn> 1:>ɧ,EwB2*\))sݳVW^/9@0qm+7ړ|] 5]UUmYt34O {+L%l͏T0SLLm'2YoU;H_%| &LNXSn:ȇ G7r6_tvZDۋ͋><)=([4͂}QN.)|ប#K@xn:LSP_KE%Ƌ=|vd:ibN?rW ܟ9#}e~nNo'Q\Z7L s b t0˹d]l ٱ4bS04)0R&粕b`>:nnHHw)X_f"PU%3.C_ ״dIQYjewTc;V.uio%L5{ć4Vy}[״sxqXEKl^m٫?oyVqmIGUU댚5 gJͽ3@&WRRQzY/{Qq%s1p!ıick`!:&#_ Zˁ(MNLeJ D9x(ܟ>N~M$oa@]tERِU#a}G=-&`x8^ifu/t yX1,7$PFrf W+߂/@!C!P  7y4Ga׵!?[W#!ٰgvee6R ݚu.5oF ]Ij~K ggزk2Ak_i0:d6*UJ|-H^ vqrr>p;b=B{5{D.vj}jMHAsr&I=)ujxrtС`[J%ZmVD%A:ȦR:85,vvVY+}IZRF|,/ZDT[0'`DPscIb U%46qctjګBafc7Լ Y=.qs1;žJ h )%abr@ƠsLzJ̑vÉu[^Ũ3CheuSKC*bQ f^Jj@Fl?l."Yn?/q}rd6)\tFqaLrotzAke]abTF} 8]'i.k"<ԫFr%d zvxU\niJ ŹFZ&ZO7 U ]$cޤ_17bUTlWe* a3NLtc^-~X(plݴdocׂ7}Bsyޯ~F=&*nzK8:Cai,I vNFr31,vq51`ib2Lyi^;1ʶOo7*M XВϽ0 w`;ȏ6"D (ZmyČ}U=9 [=9o%YaM䙚`5޴](7e>,x'׺ ي9i*N]@KsXwln 4jgh)MA$)J-Q_pDVH][pf(PLm(G;DAj9R6(D’ϴG)rԅ/+F0. *׵51_•X0:׳-م(Αd&f4 G`9ns W'7,Ϟ{[Y6g?Q5*L `7y89pB-ǰf[O'` *K7L֞BBvY $TDǁUK ݮdeɄg."/ ֐040kܐv<ܨq9a7jĬ;q]WO%MN I8DЍ…ų+ǣ{EgoL;zG5/IJʿd6}quJNNm ]3;!VM$XA?%wpxݱPSbW. :&9Ʋ)ޛs>zZe3Eg?oɒ %+z1[ F*%Tfobjw2V/{r?׵PEQ}{k#&?+eZ<}1x V;l_^"r{Eq]^OcV!EAH{⭆-#a`7>2-<#1b=Ȕ3 oU?A9x'v9#~f>\U>W`$N;^ʦ, u;ЛBOm[tV<85IS&sDʮ8:!wQ\:3n?_<:T29кJGf­+M% U0'Şh6>)jetxޔ~]Pr=W?jVSHn+ldKNE}t'$WPOm9NSKOXHK0891[m8oy׋Н{6=1.};~:j;>w`&X_]u.A1 yM09z+w8(ܘ#hzDu!7FUǬ~Cx%H.4 >vjFĹ f-Ǩã10rq8,͈o7/ OjR=H* zb{cfaX[Z89tWǀ'II| iLVT Ff ǩ<zQ 9P]8_QH/ʩ=Æ&Üexb&=I)8gµw&zC^YK "$s#c{^8!}H!m}Nk"Մ#AM.k֣@lu\ZFu5xnO=5Itr^О$LFc i\an{Il~^\'p5>^Ȟ:<byW @[+VR䕈Ѧ.2ǔzzT"+! \/Di@FRB/Alit~>ChX$J_z:1(Ƿ#;;e.LՉm+MOyI^֔/_BCEV2iAz)MumNsREdv6'+P)jP+7dՇJP{\DM!a/e[W7R9FF5DVN.f#^Y01XVwWҸ H[OJwvx_38V"C1\$ x/u75 2#yihkUeW:բ&aCӦf6)X=!+ |#Oal|; ~@5" 5RTH@ET}Ѹg6vT:\Xm$(wlAnYcLe-!MX|ݢ{?M(+Lenp_4BwhTMy*HL{ۓ˜ *-y2$ֱ?ůpy0{k&b4h0̃tl>8=F.ѱJfny>#;h5~f/}OҴYdȒº cgK^1cW[*#n[_ͽ))`]vSTzJu11V$k\LՑOt㭽dk.brTF8۷S(GU~5ՅcT9tWkœ׽cT %w@u26@O,L9pŝU$[kDa;SDt4ᷭմt?RB+\^+c~o|uz5'ugvHtC[!X004c8g(ZÙ˼U!+"׷a;Zjn7j 1j'c"p(,/xk}HkAēsa/h͝nb@~*he.5ݼOb4HJ# X1zu5Uyߎm{îJ?՚FKXkcNs f,2O`=DrٲKF̵U`(IF7Z;e`lE OYhEg56_cW '٘ mʆ#9{rq0+)xv@++:2Krd'&89ޡ)oMdlqtt?ϡtG~K$܆31*$FXM[r6d湃KъixY~ Hnsa\؁S60Q(YG(a#+,G4\2ս\-MRˢ?91.N2y}<=~$83-1 H~zXFlV G&!A!EV)<hܸ.t~|y2]e?6#u.)D ɔL8-VYa`45aF<>D䮓slB1Vσt`E GSz Mw75ʪ ol~t~^ #3e~= 'jZNs=_VWk |0Gݣ8u#!f򗛾o#s2<" @N于yXqwb#äUYT@vUxa红%ؘ[}N,6TXCG p(V;8aztTcMxT`|Z5Yī rTL@mޓWPU<swbUYY$t1s'bA3-зdF2jALtE V\}~ڿV7w5:{VVN/tջ%[hAo}c>{I#pA6ܭY-7BgJ3}J(i37X-- TwhՠQ7CpEOyhsآc%cuZ x0>׹V`Rɵ딄\:>ʩHk %:[1_PqN( n:^{b ,qWQ&'oM4%RX9&[OL`#;+^% i3&MB਽OcXPƻ>~Jv)M+|b`ohT [MQeC/Tl+P/n7#E<W/r٭69L\7219">)>7vlmy˩lW툣}:IhEGa-,s~qDd/5O'@)CITFĜg|8MP 2BZL/L=f}D\ޟ%KŌq(= 3-@ӥ,Q0Þ]p/ xCj q3A  U-ˡr׎ vf8|ĴaAƁ.)?!~c|EP aO#+CjUn ϭ\#FT|j/jS+$LH֘~LH 5g^nW+E+ɵ}GK`){]eݵf•o˿qz`vC>ʚ8 X= f (ʲo0Zy<5آՃďjeNYG7k234;׵kT{w`r9uq0" ogxUTE)Luv, cKyu ]DtDxI*i߇uZw={*l6󙔤v ?caX,,~4Xc!V]>G62WBHdLEp u3 UK) d,BQÛ1ԌX'Q^ѻo{ wa.[u*h`S=>Cfᰣ2މncȮ -{\1mO,{߃ ,MkZT+_ 0otvKB ֹѱ&SC$qEmHٮ7.:u`+H #̈́*ޕ rq2M4稇NfFNӻV 8мj&تlK~ţP2J/8_dс5ϩ[<Mw%&1f"|,3UC.1}x$Z`kCIntxQߓ&eҝ[hW;W׶SwϤ8OlhRu4Ò dٍL!x,]Ge/ZZˬ;7*\?--n ZԺr"w/X_43ڑF!D~fSZ;r0Z=Fns}\=̷U+D*{?knՖ!qCka] vob1o1O [rw|hʣHbveX>ᾰm>A9/g?ҶnUy*ȇG|# fttJ:ɾ9[ a (//2ja]iu"_1fh.%\צ!Eb޺+%;|MnŶEżå8ce9VG S|kV\+=&Zź2'HVG?{T;<9 x%y7sSd e84BBZљ{H>Оu2GA>X&>>;UWOc˙|4JV7=4E𒵰%#Yx*-'kƃ\ z.`r:xvͤ~ 8F"Sp_@e=asf=2a|?-:t405?*nl.+y{v1"|:ye`g@glG8eDEN-tX~e7KFNfe^WudHY!u2?uOb^lH^ӷ)6%ea`HfMBJ4VSŤ_>]b_CW{ 7ˇG3qNBAHcQVwg:N_cd!g!2? /8ļJ,V=0cn(xLf 8'if^Q3]8xAWgP -RjУ5QVܪ|] _p%I\X>P놛&i'5ͱJ5AK/&i +!;G!? ) 5ϵxv>_ZTʆ޳,Mjg$L1Fqg8.e`0o1WKwE/U5҈;'GvU:/c5gaٻ>{] )0ɜs-m9$Yn|xx LZt w5NQօH9R߸htt=Z3Fa҃0^,ďA5\AݠMqEC\Ok *k~jI:$D.E.h.b+tl$d|-]ۘ..Ɋga bXB:ԮA48U]W~8K-i$sA71i`(oazK2ؖ{ƣtή#]CdP" n'?g$xa؉xvy@-)F%n8t*_6Aϸt5C`lUqi\`D Q IIcBcU:+00Ċy,w#r[FM*{% # T p{ׇ[;U\xMnvfCo97Km>OHd 7&(x>MW~=ijX57w>ڔc>lyF'Q^6h,I~^9U@h'h}fɎħZT#o^#9/\X4yyb蚦53>8Tmٚy`mH@\f㧤 U11&=W-N6-9aj3,|mxb(-uIn AC5̝) ⨕UM2^*S*P4SeMonkƌړ= r_L5)F,E5tDE)o.Kӧ héQ G6ERH]Xpu7 tl>2\#u9]k pLFG|f%TDVQ,W *&O*粑,Dp`3`e(I,83"鍇~S=])VoVY*arma#Ly;uUpǦ/=* hD|X X>e;[ҢI1WđKrӠV1LH:osXw .xLsiL&>p{~؍mGNKJWy_lk#- ^O)DkѰSGXfГmCI=6kA( oXQE WqʏbCAP^GJge9_#a&MPd<*㞨˛}Or7`U?H_(..3 Jlp ˚ɎvB^W?e#-0@"l~[򙉅K*d8gTBB7,}t&h\DŽv- on%"[$r+ܵ\ h-%>9p-㷆>=4YFE;m=hBBJ2 Vȡ>Az]d_ݎ]P;.띛 br@@6#:pQm0{DmL 2h,abCpHQ wfnӎ,/s=J)'QB(C`&4'9"TꠟFZ@[)dedR6O:y&[012}ݨc1}y3 PPx#x[c&5;p$QR7cbYAH){ۢFd. Be`'ϞَD2TcOpu0HMu%PƆuД0?,jSTdPR7 иaҺ@E\ .|וeqc>8b 1^pO^&NS,)Tw)x2H|`Ơל6^$He{m]c/UM|U>K҉ 6^۞V!I# Q4Vw!دo4ʍnyl]3(bXԎm VR͕{(4UvcP_HSwm"fޚ;x>+w/{kq陨kPS\[+e@ڕX8%ԥKm3)Y/(hd~X巖9b _>{dv]!g;D"AK۽uboaגc,'*mlmEd~ؤ\_+_d%a菷'w@|ncbqK?N׳\%cE8/ڂ vBf~Jd(KG[ޙ%0X[Ob~&\eryB`LEAIAbm iQaaS}ARv\fɉڲrc3ug 7L5X55krROߺw즁*{35fn/O?8ž, {3P׽C+B%ʱDAsERDN@i)8-={U*̵R+Jn3 ͣ>7^Z>qSz*y7ǒ8[[/>sOo$hDi;43JBԹ?~ ]L]o\퇅>Ư_z-,Vd-u =*#_ wڦDvpv - XӉ:^ڡ0 [I>r H >w&ܤsJ؛(?UƏφ!:K~њ nm!]=rO%[*O; D-fBs~P5yK|ik O7~&-s~6^Dg{JC6G@M:핟|miE:Xd%N|7NTZåcck撫@;-siELYӒ5f3(%o)u[Imu]ggbK]cuZPOᇖ C彐l €@:HBL8 >XbD#}A弱o&>V:k3j5 Lҿ'hދt1T;#p9P,s8-}}A2u,Ijsª'{AJHyJG@u@$Li@u/c+b1"+\ ТzI jMh܈0o =#]>i:HzmLŖ/n]Ja3C:%KYKu ܇eY tR!\ /4 l~yO4ْwcPv]X/Ȭn _@"%f̫M_P$qG'W"HB=b qܘ5/ 7,7" ?U }FkueS%j֛}Ĥr/+Λ6σ]$T}N l 쒩xF񸃁 $<=G>"w\X&mwVwum(QԠ{A`2擦.٤'ýGK[}, U&چ`Dn"o ھ;4Q5pGa(s |["wZs~|] _ʢ,Evy5e4U*j]~z=+[5-'G#פAEƠ'"ɢyTlcQv m@O82/!1|hQQfs4&9p)$DrNAn7n`iӀڋ|u!G l^脙оYK-nBEIKX?hSs;,@ N5j(\+`-샚q&h-1Q`XGcbyk!9' lzGpK^ JU}KucRv&cr9vk ݂~(0]!ocOr/Q᡾*F|p+hS*~uIo^ g1w2cq*RWϬ2jWv@)gn}c-c "rۚ.,UW^ᒄɵC^nkg U1(3P Qw,%r|NMlKދ-5w?4"lw"@:^_#2ʨLza$[jf^fơtNO,- gqW#(<if ER{sI< h >dDA*jegAu*ƒUq/N+RײpIp #A6(5~1NG?u|oڄ5AStWms ـ| ^{gwT\ԇXҘ/xƔ>܍@nLzQ@"8Y,9^i[ s { -vZ7I}RiN Cy;뱱1r#%L)d]8F#!oy>N*-xNmQRPe{`h48\jm@ߝ1IJ繼K30${l#X==\(Txbj-j bpv+b7fkQMZs˾ Ud Lv0'76N"u׬+ZZVTy߮B ʪ,N>V?ᑣy2/H,h¡%L7oX_\$cɊ/sw8d2@+Y4}W_83WR{:$ x?Àbbh B=*J"lV$eB$|55A$KP G":' 4|=xReKmdsH#X,22|'Y'#t2̲ @peFT^ M-(Dk\B^8no{uwrtM 4*)5$o!5Ґs`x'0poy#KM$j-mO_fU6é~?6x޴(׻H0^ ܃>BmWym<zD8_9 ^3h&|LIΦ Nٸv_tXv'$ U_gN!21س䴠3`s2̕B5mŤvw%.(`J=" l,nFi7x7!2fh+񛽈.kJd qn@tjnL zv&*sX 8;NE Շa]+q>H[?FPΈ.'@`݂cdx> ty 0"Q(F9;)ibUa#%fL&_" yLp- )XGSmڙÊ0<=ֿ" %J)(fdZȵ ib>9tY}**Bx͆UJ7k+ 16z%xqD'/y˭6 P'ཡ`HBc3!BkArV\lt't4%k0C ZY0֟UQ(CT6'P]1Ҁcb.^P8cz`N8 c|x*:7v~a`f7]cE*eY* H ݷo9Jv1̰1*>6#8f`LRFjSr`?Clq23R:,Տ?*g}`'=+vڍ9?)-$INGQtNt\h9X҂F>V;enrXܿ4w[ٴQcFUR@sȍ[7qfDn~|dcSd 6f:Y BжhhmN-1jx۶o{]M{3e+9OAҨR?FY>}Cg3|e'j8%>hNv*Q&e n[:2@(_cXBݚs|s<*AeD~|❭J!׌Ҹ*u$濤B.JTYH.5SAg:H#yt64m*dvWCiHi"4|POb@uo")t_Ię;CB'\kƗ۾stGi{YK ,b9XDZcZbxd-i/˯fѝd n1{^׹-%tQ%tֺC̥o\ϊY,59Pnã$-O[ʒG%4nȐ XVgsۉ|UW.D׿>kDN {۫6ߊ듨 AŸbT>\q`+ϖ}nWGfn j/-p-9hh(=([lk"rQ@`1YJȬҀl`x醠'acZZ$4Öt_ <`">][*uC3uwf<êK:no^Up8eibF7iɯiK,t^/7nOφ*hn+.d(kWLP5oN3ΈbXkiZg.#1X&Ȭ3w9O ؤ;j0԰6 PγTIzP"⠺*&xV`9=u1N5C1g6~ |Mǎ=~& GA7yN%N>wgc3M}85D?Whx¡q'xoD-.NAT?ˤzVuaUsX{g&c;e.-R%mC!?WG!'ؔ-y?S^+ vhޥEKciJAf(${ Px|gʝ)@DV}]{Fu <>s=B"a9# Lu, @gқy5im1?5:63tM3'8BcL}>bí7Yo{oXPic)Y׫̆Owm'*OE7ͧ$[Iv›jH^I A‰'Q˂ H ?]F` [$l JQ~rt%rzV3[~K+WNyzLha-]\>ԽI9Ǣxj͎FA~?mr ;֕xAVJl wWSm8EjPr/جY4``۫DP36 vVv .3rb.lѕnx < Ĺbxn%|낚aT$b!#ȹ51t-^Po�DHC%AX '" /z:ކ \fޒ-{xtP@g\GJL6^o:g3͉ݒ֧3&a6AwWb̭m3VifJ(` B#- ӱL&T\?ŝc<[Bbω:E@L&m۽9z Z7 TKz;*[?9U m /@ߡ^Wu T hQA2TNZ0jNJ(!o^r[eoL6j[*x:%[,N$BJgnexL hYi:N, n*9ڱx,Pɔ3'+s>Vszw%vĊ 75DoWO7͸G?2ӛ&5"+)S+h?Z)%oHs(TKRtQeLuH[cQeP#&ulWz>vd՗sbD =EF]B%, ih3=>sti 'XiVǻ*!G&B^7`Hv7JL-xm;NdCk/[H q.,d1N>~uИ5Ub==LQH7U V!AvaFxۅp-,T': 8<:p.o+vFA,};n/=tI9yP{ DoU+D1Ud3 qTn4^Z{-)HIX YhUG/^: MT5ep*eYxwi#9<7,=Mdx~w6BAKmQW!l|E7nD'2ṠVgF"Ga&&D&HA, A0w7XK )3"'5u%PS=4^h2Ry'TTko=4p4R_+ MMT~|:4j &r#Q}k$q-5b" ,f#0-6%h͊(.֬wV1{V@Mzٶe vťY? $Wx"^xl6]) 912"De5^jhmT8L;|nMO%ډYds@z}1AW3\$Bi*Z0s D@B&/IQ~KI(.ɪ#<0Q3[ c@U`3WRBFqti˽y~آ+8 yX9]vyNRm>t>MlM@,%ykǮoj3UHUqƁB0QGA40{ pk:=5*[>`hNB S7"R- Nl U(/RS|B𺃈yS;"(iGjVDǀ7\YՉ b#/9O^2JTOYD,C줊k Z2NiJw'VsIC6D]nr5 "޴BmZa2 y5O'tJ+h+Fepxb ōJV֑~]p@'Eo2kd[M'arrgբ|/$ љXRPT{COuˀg3ގrƱaGVfV5 #-":IoLƍWШsK12iUZt&M+ ̂)JEO8fFyVb x:k0Nkb.ܘ4aNh.k ;V[JEԆxbX6Zkt,kRåW{FۅE7'Lg<9Z)@xQO!+Sf_ˇ͜N.9(=K(YY1iGRt{6%0$kJnr1P8<:OؖqN !z$nOpJW-Qk&j`D~V /5f}PxRF!_M۷$Iyq_[1P!U۝]YpmWT:^<8k?d@dl I9~I bAol=,$9 5؞Xvoy'rheAu1BJ`bMN+ж7:LUS~#)q=|]n SzNLxn [: LEUQl eO;GBˣz2BL WTZTa*l-ϵ|WÖA+VCO'AbV"w^\i'Us#W*QwQN}D2u'r@/j"I3.:< (_yueHFc}%GҾ}d6A ]M   Հ5q`p!pB[D"n6ПɵB5R-_`J@R<(Ͱ,Df@R lB[ؚ4Sۡ6yWl#tӦ3 f齁Y1|cy*Mq\W.0o稜G8t>눮If+$EXJD_sj{Ljg4XK5pz댾qG63 UI},7 u\s`FKu+KhD@"ө /*~nnd 'C+"]c&$6?:ˑq`O3*^Q#~/}Y!%gfh링 &Uèj fxV͉&j9x[7 uZۧV1@D b2hrȥm ? ԻIP9+9Ci+N>d8C) zAxף mdr[!㌻ѠPt#]r=첷ǭ?N6g#M}[é(N:q½g3z:Ob6BN|XxB| *J{eg} 2DKU:k ZSp61'0'|==-ADEAduMXM|igIbY3oe-A᦭)!M$}̴7ep C4WYB$pĿ㘌yߛQ>~ ߍ|UpQ@YZkernlab/R/0000755000175100001440000000000012560414652012114 5ustar hornikuserskernlab/R/kkmeans.R0000644000175100001440000004651312676464725013717 0ustar hornikusers## kernel kmeans function ## author: alexandros setGeneric("kkmeans",function(x, ...) standardGeneric("kkmeans")) setMethod("kkmeans", signature(x = "formula"), function(x, data = NULL, na.action = na.omit, ...) { mt <- terms(x, data = data) if(attr(mt, "response") > 0) stop("response not allowed in formula") attr(mt, "intercept") <- 0 cl <- match.call() mf <- match.call(expand.dots = FALSE) mf$formula <- mf$x mf$... <- NULL mf[[1L]] <- quote(stats::model.frame) mf <- eval(mf, parent.frame()) na.act <- attr(mf, "na.action") x <- model.matrix(mt, mf) res <- kkmeans(x, ...) cl[[1]] <- as.name("kkmeans") if(!is.null(na.act)) n.action(res) <- na.action return(res) }) setMethod("kkmeans",signature(x="matrix"),function(x, centers, kernel = "rbfdot", kpar = "automatic", alg ="kkmeans", p = 1, na.action = na.omit, ...) { x <- na.action(x) rown <- rownames(x) x <- as.matrix(x) m <- nrow(x) if (missing(centers)) stop("centers must be a number or a matrix") if (length(centers) == 1) { nc <- centers if (m < centers) stop("more cluster centers than data points.") } else nc <- dim(centers)[2] if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot","stringdot")) if(kernel == "matrix") if(dim(x)[1]==dim(x)[2]) return(kkmeans(as.kernelMatrix(x), centers= centers)) else stop(" kernel matrix not square!") if(is.character(kpar)) if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot"||kernel=="stringdot") && kpar=="automatic" ) { cat (" Setting default kernel parameters ","\n") kpar <- list() } } if (!is.function(kernel)) if (!is.list(kpar)&&is.character(kpar)&&(class(kernel)=="rbfkernel" || class(kernel) =="laplacedot" || kernel == "laplacedot"|| kernel=="rbfdot")){ kp <- match.arg(kpar,"automatic") if(kp=="automatic") kpar <- list(sigma=mean(sigest(x,scaled=FALSE)[c(1,3)])) cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n") } if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if(length(centers) == 1){ suppressWarnings(vgr<- vgr2 <- split(sample(1:m,m),1:centers)) ncenters <- centers } else { ncenters <- ns <- dim(centers)[1] dota <- rowSums(x*x)/2 dotb <- rowSums(centers*centers)/2 ktmp <- x%*%t(centers) for(i in 1:ns) ktmp[,i]<- ktmp[,i] - dota - rep(dotb[i],m) prts <- max.col(ktmp) vgr <- vgr2 <- lapply(1:ns, function(x) which(x==prts)) } if(is.character(alg)) alg <- match.arg(alg,c("kkmeans","kerninghan", "normcut")) if(alg == "kkmeans") { p <- NULL D <- NULL D1 <- NULL w <- rep(1,m) } if(alg=="kerninghan") { p <- p D <- kernelMult(kernel,x, , rep(1,m)) w <- rep(1,m) D1 <- NULL } if(alg=="normcut") { p <- p D1 <- 1 w <- kernelMult(kernel,x, , rep(1,m)) } ## initialize lower bound and distance matrix dismat <- lower <- matrix(0,m,ncenters) ## calculate diagonal kdiag <- rep(1,m) for (i in 1:m) kdiag[i] <- drop(kernel(x[i,],x[i,])) ## initialize center-newcenter distance vector second sum vector secsum <- dc <- rep(1,ncenters) mindis <- rep(0,m) cind <- 1:ncenters for ( i in 1:ncenters) { ## compute second sum eq. 1 secsum[i] <- sum(affinMult(kernel, x[vgr[[i]],,drop=FALSE],,w[vgr[[i]]], p , D, D1) * w[vgr[[i]]])/sum(w[vgr[[i]]])^2 ## calculate initial distance matrix and lower bounds lower[,i] <- dismat[,i] <- - 2 * affinMult(kernel,x,x[vgr[[i]],,drop=FALSE], w[vgr[[i]]], p ,D, D1)/sum(w[vgr[[i]]]) + secsum[i] + kdiag } cluserm <- max.col(-dismat) for(i in 1:ncenters) vgr2[[i]] <- which(cluserm==i) while(1){ for (z in 1:ncenters) dc[z] <- -2*sum(affinMult(kernel, x[vgr2[[z]],,drop=FALSE], x[vgr[[z]],,drop=FALSE], w[vgr[[z]]], p, D, D1)*w[vgr2[[z]]])/(sum(w[vgr[[z]]])*sum(w[vgr2[[z]]])) + sum(affinMult(kernel, x[vgr[[z]],,drop=FALSE], ,w[vgr[[z]]], p, D, D1) * w[vgr[[z]]]) / sum(w[vgr[[z]]])^2 + sum(affinMult(kernel, x[vgr2[[z]],,drop=FALSE], ,w[vgr2[[z]]], p, D, D1) * w[vgr2[[z]]]) / sum(w[vgr2[[z]]])^2 ## assign new cluster indexes vgr <- vgr2 if(sum(abs(dc)) < 1e-15) break for (u in 1:ncenters){ ## compare already calulated distances of every poit to intra - center distance to determine if ## it is necesary to compute the distance at this point, we create an index of points to compute distance if(u > 1) compin <- apply(t(t(dismat[,1:(u-1)]) < dismat[,u] - dc[u]),1,sum)==0 else compin <- rep(TRUE,m) ## compute second sum eq. 1 secsum[u] <- sum(affinMult(kernel, x[vgr[[u]],,drop=FALSE], ,w[vgr[[u]]], p, D, D1) * w[vgr[[u]]])/sum(w[vgr[[u]]])^2 ## compute distance matrix and lower bounds lower[compin,u] <- dismat[compin,u] <- - 2 * affinMult(kernel,x[compin,],x[vgr[[u]],,drop=FALSE], w[vgr[[u]]], p , D, D1)/sum(w[vgr[[u]]]) + secsum[u] + kdiag[compin] } ## calculate new cluster indexes cluserm <- max.col(-dismat) for(i in 1:ncenters) vgr2[[i]] <- which(cluserm==i) } cluster <- max.col(-dismat) size <- unlist(lapply(1:ncenters, ll <- function(l){length(which(cluster==l))})) cent <- matrix(unlist(lapply(1:ncenters,ll<- function(l){colMeans(x[which(cluster==l),])})),ncol=dim(x)[2], byrow=TRUE) withss <- unlist(lapply(1:ncenters,ll<- function(l){sum((x[which(cluster==l),] - cent[l,])^2)})) names(cluster) <- rown return(new("specc", .Data=cluster, size = size, centers=cent, withinss=withss, kernelf= kernel)) }) ## kernel Matrix interface setMethod("kkmeans",signature(x="kernelMatrix"),function(x, centers, ...) { m <- nrow(x) if (missing(centers)) stop("centers must be a number or a matrix") if (length(centers) == 1) { nc <- centers if (m < centers) stop("more cluster centers than data points.") } else nc <- dim(centers)[2] if(length(centers) == 1){ suppressWarnings(vgr<- vgr2 <- split(sample(1:m,m),1:centers)) ncenters <- centers } else ncenters <- dim(centers)[1] ## initialize lower bound and distance matrix dismat <- lower <- matrix(0,m,ncenters) ## diagonal kdiag <- diag(x) ## weigths (should be adapted for future versions !!) w <- rep(1,m) ## initialize center-newcenter distance vector second sum vector secsum <- dc <- rep(1,ncenters) mindis <- rep(0,m) cind <- 1:ncenters for ( i in 1:ncenters) { ## compute second sum eq. 1 secsum[i] <- sum(drop(crossprod(x[vgr[[i]],vgr[[i]],drop=FALSE],w[vgr[[i]]])) * w[vgr[[i]]])/sum(w[vgr[[i]]])^2 ## calculate initial distance matrix and lower bounds lower[,i] <- dismat[,i] <- - 2 * x[,vgr[[i]],drop=FALSE]%*%w[vgr[[i]]]/sum(w[vgr[[i]]]) + secsum[i] + kdiag } cluserm <- max.col(-dismat) for(i in 1:ncenters) vgr2[[i]] <- which(cluserm==i) while(1){ for (z in 1:ncenters) dc[z] <- -2*sum((x[vgr2[[z]],vgr[[z]],drop=FALSE] %*% w[vgr[[z]]])*w[vgr2[[z]]])/(sum(w[vgr[[z]]])*sum(w[vgr2[[z]]])) + sum(drop(crossprod(x[vgr[[z]],vgr[[z]],drop=FALSE],w[vgr[[z]]])) * w[vgr[[z]]]) / sum(w[vgr[[z]]])^2 + sum(drop(crossprod(x[vgr2[[z]],vgr2[[z]],drop=FALSE],w[vgr2[[z]]])) * w[vgr2[[z]]]) / sum(w[vgr2[[z]]])^2 ## assign new cluster indexes vgr <- vgr2 if(sum(abs(dc))<1e-15) break for (u in 1:ncenters){ ## compare already calulated distances of every point to intra - center distance to determine if ## it is necesary to compute the distance at this point, we create an index of points to compute distance if(u > 1) compin <- apply(t(t(dismat[,1:(u-1)]) < dismat[,u] - dc[u]),1,sum)==0 else compin <- rep(TRUE,m) ## compute second sum eq. 1 secsum[u] <- sum(drop(crossprod(x[vgr[[u]],vgr[[u]],drop=FALSE],w[vgr[[u]]])) * w[vgr[[u]]])/sum(w[vgr[[u]]])^2 ## compute distance matrix and lower bounds lower[compin,u] <- dismat[compin,u] <- - 2 * (x[which(compin),vgr[[u]],drop=FALSE] %*% w[vgr[[u]]])/sum(w[vgr[[u]]]) + secsum[u] + kdiag[compin] } ## calculate new cluster indexes cluserm <- max.col(-dismat) for(i in 1:ncenters) vgr2[[i]] <- which(cluserm==i) } cluster <- max.col(-dismat) size <- unlist(lapply(1:ncenters, ll <- function(l){length(which(cluster==l))})) cent <- matrix(unlist(lapply(1:ncenters,ll<- function(l){colMeans(x[which(cluster==l),])})),ncol=dim(x)[2], byrow=TRUE) withss <- unlist(lapply(1:ncenters,ll<- function(l){sum((x[which(cluster==l),] - cent[l,])^2)})) return(new("specc", .Data=cluster, size = size, centers=cent, withinss=withss, kernelf= "Kernel matrix used")) }) ## List interface setMethod("kkmeans",signature(x="list"),function(x, centers, kernel = "stringdot", kpar = list(length=4, lambda=0.5), alg ="kkmeans", p = 1, na.action = na.omit, ...) { x <- na.action(x) m <- length(x) if (missing(centers)) stop("centers must be a number or a matrix") if (length(centers) == 1) { nc <- centers if (m < centers) stop("more cluster centers than data points.") } else nc <- dim(centers)[2] if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if(length(centers) == 1){ suppressWarnings(vgr<- vgr2 <- split(sample(1:m,m),1:centers)) ncenters <- centers } else ncenters <- dim(centers)[1] if(is.character(alg)) alg <- match.arg(alg,c("kkmeans","kerninghan", "normcut")) if(alg == "kkmeans") { p <- NULL D <- NULL D1 <- NULL w <- rep(1,m) } if(alg=="kerninghan") { p <- p D <- kernelMult(kernel,x, , rep(1,m)) w <- rep(1,m) D1 <- NULL } if(alg=="normcut") { p <- p D1 <- 1 w <- kernelMult(kernel,x, , rep(1,m)) } ## initialize lower bound and distance matrix dismat <- lower <- matrix(0,m,ncenters) ## calculate diagonal kdiag <- rep(1,m) for (i in 1:m) kdiag[i] <- drop(kernel(x[[i]],x[[i]])) ## initialize center-newcenter distance vector second sum vector secsum <- dc <- rep(1,ncenters) mindis <- rep(0,m) cind <- 1:ncenters for ( i in 1:ncenters) { ## compute second sum eq. 1 secsum[i] <- sum(affinMult(kernel, x[vgr[[i]]],,w[vgr[[i]]], p , D, D1) * w[vgr[[i]]])/sum(w[vgr[[i]]])^2 ## calculate initial distance matrix and lower bounds lower[,i] <- dismat[,i] <- - 2 * affinMult(kernel,x,x[vgr[[i]]], w[vgr[[i]]], p ,D, D1)/sum(w[vgr[[i]]]) + secsum[i] + kdiag } cluserm <- max.col(-dismat) for(i in 1:ncenters) vgr2[[i]] <- which(cluserm==i) while(1){ for (z in 1:ncenters) dc[z] <- -2*sum(affinMult(kernel, x[vgr2[[z]]], x[vgr[[z]]], w[vgr[[z]]], p, D, D1)*w[vgr2[[z]]])/(sum(w[vgr[[z]]])*sum(w[vgr2[[z]]])) + sum(affinMult(kernel, x[vgr[[z]]], ,w[vgr[[z]]], p, D, D1) * w[vgr[[z]]]) / sum(w[vgr[[z]]])^2 + sum(affinMult(kernel, x[vgr2[[z]]], ,w[vgr2[[z]]], p, D, D1) * w[vgr2[[z]]]) / sum(w[vgr2[[z]]])^2 ## assign new cluster indexes vgr <- vgr2 if(sum(abs(dc))<1e-15) break for (u in 1:ncenters){ ## compare already calulated distances of every poit to intra - center distance to determine if ## it is necesary to compute the distance at this point, we create an index of points to compute distance if(u > 1) compin <- apply(t(t(dismat[,1:(u-1)]) < dismat[,u] - dc[u]),1,sum)==0 else compin <- rep(TRUE,m) ## compute second sum eq. 1 secsum[u] <- sum(affinMult(kernel, x[vgr[[u]]], ,w[vgr[[u]]], p, D, D1) * w[vgr[[u]]])/sum(w[vgr[[u]]])^2 ## compute distance matrix and lower bounds lower[compin,u] <- dismat[compin,u] <- - 2 * affinMult(kernel,x[compin,],x[vgr[[u]]], w[vgr[[u]]], p , D, D1)/sum(w[vgr[[u]]]) + secsum[u] + kdiag[compin] } ## calculate new cluster indexes cluserm <- max.col(-dismat) for(i in 1:ncenters) vgr2[[i]] <- which(cluserm==i) } cluster <- max.col(-dismat) size <- unlist(lapply(1:ncenters, ll <- function(l){length(which(cluster==l))})) cent <- matrix(unlist(lapply(1:ncenters,ll<- function(l){colMeans(x[which(cluster==l),])})),ncol=dim(x)[2], byrow=TRUE) withss <- unlist(lapply(1:ncenters,ll<- function(l){sum((x[which(cluster==l),] - cent[l,])^2)})) return(new("specc", .Data=cluster, size = size, centers=cent, withinss=withss, kernelf= kernel)) }) setGeneric("affinMult",function(kernel, x, y = NULL, z, p, D, D1, blocksize = 256) standardGeneric("affinMult")) affinMult.rbfkernel <- function(kernel, x, y=NULL, z, p, D, D1,blocksize = 256) { if(is.null(p)&is.null(D)&is.null(D1)) res <- kernelMult(kernel,x,y,z) else{ if(!is.matrix(y)&&!is.null(y)) stop("y must be a matrix") if(!is.matrix(z)&&!is.vector(z)) stop("z must be a matrix or a vector") sigma <- kpar(kernel)$sigma n <- dim(x)[1] m <- dim(x)[2] nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 dota <- as.matrix(rowSums(x^2)) if (is.null(y) & is.null(D1)) { if(is.vector(z)) { if(!length(z) == n) stop("vector z length must be equal to x rows") z <- matrix(z,n,1) } if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) { dotab <- rep(1,blocksize)%*%t(dota) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- exp(sigma*(2*x[lowerl:upperl,]%*%t(x) - dotab - dota[lowerl:upperl]%*%t(rep.int(1,n))))%*%z - z[lowerl:upperl,]*(1-p) lowerl <- upperl + 1 } } if(lowerl <= n) res[lowerl:n,] <- exp(sigma*(2*x[lowerl:n,]%*%t(x) - rep.int(1,n+1-lowerl)%*%t(dota) - dota[lowerl:n]%*%t(rep.int(1,n))))%*%z- z[lowerl:upperl,]*(1-p) } if(is.matrix(y) & is.null(D1)) { n2 <- dim(y)[1] if(is.vector(z)) { if(!length(z) == n2) stop("vector z length must be equal to y rows") z <- matrix(z,n2,1) } if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) dotb <- as.matrix(rowSums(y*y)) if(nblocks > 0) { dotbb <- rep(1,blocksize)%*%t(dotb) for(i in 1:nblocks) { upperl = upperl + blocksize if(upperl < n2) res[lowerl:upperl,] <- exp(sigma*(2*x[lowerl:upperl,]%*%t(y) - dotbb - dota[lowerl:upperl]%*%t(rep.int(1,n2))))%*%z-z[lowerl:upperl,]*(1-p) - z[lowerl:upperl,]*D[lowerl:upperl] if(upperl >n2 & lowerl n2 & n>=n2){ res[lowerl:n,] <- exp(sigma*(2*x[lowerl:n,]%*%t(y) - rep.int(1,n+1-lowerl)%*%t(dotb) -dota[lowerl:n]%*%t(rep.int(1,n2))))%*%z res[lowerl:n2,] <- res[lowerl:n2,] - z[lowerl:n2,]*(1-p) - z[lowerl:n2,]*D[lowerl:n2] } else res[lowerl:n,] <- exp(sigma*(2*x[lowerl:n,]%*%t(y) - rep.int(1,n+1-lowerl)%*%t(dotb) - dota[lowerl:n]%*%t(rep.int(1,n2))))%*%z } } if (is.null(y) & !is.null(D1)) { if(is.vector(z)) { if(!length(z) == n) stop("vector z length must be equal to x rows") z <- matrix(z,n,1) } if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) { dotab <- rep(1,blocksize)%*%t(dota) for(i in 1:nblocks) { upperl = upperl + blocksize tmp <- exp(sigma*(2*x[lowerl:upperl,]%*%t(x) - dotab - dota[lowerl:upperl]%*%t(rep.int(1,n)))) D1 <- 1/colSums(tmp) res[lowerl:upperl,] <- D1*tmp%*%diag(D1)%*%z - z[lowerl:upperl,]*(1-D1) lowerl <- upperl + 1 } } if(lowerl <= n){ tmp <- exp(sigma*(2*x[lowerl:n,]%*%t(x) - rep.int(1,n+1-lowerl)%*%t(dota) - dota[lowerl:n]%*%t(rep.int(1,n)))) res[lowerl:n,] <- D1*tmp%*%diag(D1)%*%z- z[lowerl:upperl,]*(1-D1) } } if(is.matrix(y) &!is.null(D1)) { n2 <- dim(y)[1] if(is.vector(z)) { if(!length(z) == n2) stop("vector z length must be equal to y rows") z <- matrix(z,n2,1) } if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) dotb <- as.matrix(rowSums(y*y)) ones <- rep(1,blocksize) if(nblocks > 0) { dotbb <- rep(1,blocksize)%*%t(dotb) for(i in 1:nblocks) { upperl = upperl + blocksize if(upperl < n2) tmp <- exp(sigma*(2*x[lowerl:upperl,]%*%t(y) - dotbb - dota[lowerl:upperl]%*%t(rep.int(1,n2)))) D1 <- 1/colSums(tmp) res[lowerl:upperl,] <- D1*tmp%*%diag(D1)%*%z-z[lowerl:upperl,]*(1-D1) if(upperl >n2 & lowerl n2 & n>=n2){ tmp <- exp(sigma*(2*x[lowerl:n,]%*%t(y) -rep.int(1,n+1-lowerl)%*%t(dotb) -dota[lowerl:n]%*%t(rep.int(1,n2)))) D1 <- 1/colSums(tmp) res[lowerl:n,] <- D1*tmp%*%diag(D1)%*%z res[lowerl:n2,] <- res[lowerl:n2,] - z[lowerl:n2,]*(1-D1) } else{ tmp <- exp(sigma*(2*x[lowerl:n,]%*%t(y) -rep.int(1,n+1-lowerl)%*%t(dotb) -dota[lowerl:n]%*%t(rep.int(1,n2)))) D1 <- 1/colSums(tmp) res[lowerl:n,] <- D1*tmp%*%diag(D1)%*%z } } } } return(res) } setMethod("affinMult",signature(kernel="kernel", x="matrix"),affinMult.rbfkernel) kernlab/R/gausspr.R0000644000175100001440000003567512676464637013763 0ustar hornikusers## Gaussian Processes implementation. Laplace approximation for classification. ## author : alexandros karatzoglou setGeneric("gausspr", function(x, ...) standardGeneric("gausspr")) setMethod("gausspr",signature(x="formula"), function (x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE){ cl <- match.call() m <- match.call(expand.dots = FALSE) if (is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m$... <- NULL m$formula <- m$x m$x <- NULL m[[1L]] <- quote(stats::model.frame) m <- eval(m, parent.frame()) Terms <- attr(m, "terms") attr(Terms, "intercept") <- 0 x <- model.matrix(Terms, m) y <- model.extract(m, "response") if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { remove <- unique(c(which(labels(Terms) %in% names(attr(x, "contrasts"))), which(!scaled) ) ) scaled <- !attr(x, "assign") %in% remove } ret <- gausspr(x, y, scaled = scaled, ...) kcall(ret) <- cl terms(ret) <- Terms if (!is.null(attr(m, "na.action"))) n.action(ret) <- attr(m, "na.action") return (ret) }) setMethod("gausspr",signature(x="vector"), function(x,...) { x <- t(t(x)) ret <- gausspr(x, ...) ret }) setMethod("gausspr",signature(x="matrix"), function (x, y, scaled = TRUE, type = NULL, kernel = "rbfdot", kpar = "automatic", var = 1, variance.model = FALSE, tol = 0.0005, cross = 0, fit = TRUE, ... ,subset ,na.action = na.omit) { ## should become an option reduced <- FALSE ## subsetting and na-handling for matrices ret <- new("gausspr") if (!missing(subset)) x <- x[subset,] if (is.null(y)) x <- na.action(x) else { df <- na.action(data.frame(y, x)) y <- df[,1] x <- as.matrix(df[,-1]) } ncols <- ncol(x) m <- nrows <- nrow(x) if (is.null (type)) type(ret) <- if (is.factor(y)) "classification" else "regression" else type(ret) <- type x.scale <- y.scale <- NULL ## scaling if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { co <- !apply(x[,scaled, drop = FALSE], 2, var) if (any(co)) { scaled <- rep(FALSE, ncol(x)) warning(paste("Variable(s)", paste("`",colnames(x[,scaled, drop = FALSE])[co], "'", sep="", collapse=" and "), "constant. Cannot scale data.") ) } else { xtmp <- scale(x[,scaled]) x[,scaled] <- xtmp x.scale <- attributes(xtmp)[c("scaled:center","scaled:scale")] if (is.numeric(y)&&(type(ret)!="classification")) { y <- scale(y) y.scale <- attributes(y)[c("scaled:center","scaled:scale")] y <- as.vector(y) } tmpsc <- list(scaled = scaled, x.scale = x.scale, y.scale = y.scale) } } if (var < 10^-3) stop("Noise variance parameter var has to be greater than 10^-3") # in case of classification: transform factors into integers if (is.factor(y)) { lev(ret) <- levels (y) y <- as.integer (y) } else { if (type(ret) == "classification" && any(as.integer (y) != y)) stop ("dependent variable has to be of factor or integer type for classification mode.") if(type(ret) == "classification") lev(ret) <- unique (y) } # initialize nclass(ret) <- length (lev(ret)) if(!is.null(type)) type(ret) <- match.arg(type,c("classification", "regression")) if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot")) if(is.character(kpar)) if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot") && kpar=="automatic" ) { cat (" Setting default kernel parameters ","\n") kpar <- list() } } if (!is.function(kernel)) if (!is.list(kpar)&&is.character(kpar)&&(class(kernel)=="rbfkernel" || class(kernel) =="laplacedot" || kernel == "laplacedot"|| kernel=="rbfdot")){ kp <- match.arg(kpar,"automatic") if(kp=="automatic") kpar <- list(sigma=mean(sigest(x,scaled=FALSE)[c(1,3)])) cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n") } if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") p <- 0 if (type(ret) == "classification") { indexes <- lapply(1:nclass(ret), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) xd <- matrix(0,(li+lj),dim(x)[2]) xdi <- 1:(li+lj) <= li xd[xdi,rep(TRUE,dim(x)[2])] <- x[indexes[[i]],] xd[xdi == FALSE,rep(TRUE,dim(x)[2])] <- x[indexes[[j]],] if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) yd <- c(rep(1,li),rep(-1,lj)) else yd <- c(rep(-1,li),rep(1,lj)) if(reduced == FALSE){ K <- kernelMatrix(kernel,xd) gradnorm <- 1 alphag <- solut <- rep(0,li+lj) while (gradnorm > tol) { f <- crossprod(K,alphag) grad <- -yd/(1 + exp(yd*f)) hess <- exp(yd*f) hess <- hess / ((1 + hess)^2) ## We use solveiter instead of solve to speed up things ## A <- t(t(K)*as.vector(hess)) ## diag(A) <- diag(A) + 1 ## alphag <- alphag - solve(A,(grad + alphag)) solut <- solveiter(K, hess, (grad + alphag), solut) alphag <- alphag - solut gradnorm <- sqrt(sum((grad + alphag)^2)) } } else if (reduced ==TRUE) { yind <- t(matrix(unique(yd),2,length(yd))) ymat <- matrix(0, length(yd), 2) ymat[yind==yd] <- 1 ##Z <- csi(xd, ymat, kernel = kernel, rank = dim(yd)[1]) ##Z <- Z[sort(pivots(Z),index.return = TRUE)$ix, ,drop=FALSE] Z <- inchol(xd, kernel = kernel) gradnorm <- 1 alphag <- rep(0,li+lj) m1 <- dim(Z)[1] n1 <- dim(Z)[2] Ksub <- diag(rep(1,n1)) while (gradnorm > tol) { f <- drop(Z%*%crossprod(Z,alphag)) f[which(f>20)] <- 20 grad <- -yd/(1 + exp(yd*f)) hess <- exp(yd*f) hess <- as.vector(hess / ((1 + hess)^2)) alphag <- alphag - (- Z %*%solve(Ksub + (t(Z)*hess)%*%Z) %*% (t(Z)*hess))%*%(grad + alphag) + (grad + alphag) gradnorm <- sqrt(sum((grad + alphag)^2)) } } alpha(ret)[[p]] <- alphag alphaindex(ret)[[p]] <- c(indexes[[i]],indexes[[j]]) } } } if (type(ret) == "regression") { K <- kernelMatrix(kernel,x) if(variance.model) { sol <- solve(K + diag(rep(var, length = m))) rm(K) alpha(ret) <- sol%*%y } else alpha(ret) <- solve(K + diag(rep(var, length = m))) %*% y } kcall(ret) <- match.call() kernelf(ret) <- kernel xmatrix(ret) <- x if(variance.model) sol(ret) <- sol fitted(ret) <- if (fit) predict(ret, x) else NA if (fit){ if(type(ret)=="classification") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="regression"){ if (!is.null(scaling(ret)$y.scale)) fitted(ret) <- fitted(ret) * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center" error(ret) <- drop(crossprod(fitted(ret) - y)/m) } } if(any(scaled)) scaling(ret) <- tmpsc cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) if(type(ret)=="classification") { cret <- gausspr(x[cind,], y[cind], scaled = FALSE, type=type(ret),kernel=kernel,var = var, cross = 0, fit = FALSE) cres <- predict(cret, x[vgr[[i]],]) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } if(type(ret)=="regression") { cret <- gausspr(x[cind,],y[cind],type=type(ret),scaled = FALSE, kernel=kernel,var = var,tol=tol, cross = 0, fit = FALSE) cres <- predict(cret, x[vgr[[i]],]) if (!is.null(scaling(ret)$y.scale)) scal <- scaling(ret)$y.scale$"scaled:scale" cerror <- drop((scal^2)*crossprod(cres - y[vgr[[i]]])/m) + cerror } } cross(ret) <- cerror } return(ret) }) setMethod("predict", signature(object = "gausspr"), function (object, newdata, type = "response", coupler = "minpair") { sc <- 0 type <- match.arg(type,c("response","probabilities","votes", "variance", "sdeviation")) if (missing(newdata) && type!="response") return(fitted(object)) else if(missing(newdata)) { newdata <- xmatrix(object) sc <- 1 } ncols <- ncol(xmatrix(object)) nrows <- nrow(xmatrix(object)) oldco <- ncols if (!is.null(terms(object))) { newdata <- model.matrix(delete.response(terms(object)), as.data.frame(newdata), na.action = na.action) } else newdata <- if (is.vector (newdata)) t(t(newdata)) else as.matrix(newdata) newcols <- 0 newnrows <- nrow(newdata) newncols <- ncol(newdata) newco <- newncols if (oldco != newco) stop ("test vector does not match model !") if (is.list(scaling(object)) && sc != 1) newdata[,scaling(object)$scaled] <- scale(newdata[,scaling(object)$scaled, drop = FALSE], center = scaling(object)$x.scale$"scaled:center", scale = scaling(object)$x.scale$"scaled:scale" ) p <- 0 if(type == "response") { if(type(object)=="classification") { predres <- 1:newnrows votematrix <- matrix(0,nclass(object),nrows) for(i in 1:(nclass(object)-1)) { jj <- i+1 for(j in jj:nclass(object)) { p <- p+1 ret <- kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[p]],],alpha(object)[[p]]) votematrix[i,ret>0] <- votematrix[i,ret>0] + 1 votematrix[j,ret<0] <- votematrix[j,ret<0] + 1 } } predres <- sapply(predres, function(x) which.max(votematrix[,x])) } } if(type == "probabilities") { if(type(object)=="classification") { binprob <- matrix(0, newnrows, nclass(object)*(nclass(object) - 1)/2) for(i in 1:(nclass(object)-1)) { jj <- i+1 for(j in jj:nclass(object)) { p <- p+1 binprob[,p] <- 1/(1+exp(-kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[p]],],alpha(object)[[p]]))) } } ## multiprob <- sapply(1:newnrows, function(x) couple(binprob[x ,],coupler = coupler)) multiprob <- couple(binprob, coupler = coupler) } } if(type(object) == "regression") { if (type == "variance"||type == "sdeviation") { Ktest <- kernelMatrix(kernelf(object),xmatrix(object), newdata) predres <- diag(kernelMatrix(kernelf(object),newdata) - t(Ktest) %*% sol(object) %*% Ktest) if (type== "sdeviation") predres <- sqrt(predres) if (!is.null(scaling(object)$y.scale)) predres <- predres * scaling(object)$y.scale$"scaled:scale" + scaling(object)$y.scale$"scaled:center" } else { predres <- kernelMult(kernelf(object),newdata,xmatrix(object),as.matrix(alpha(object))) if (!is.null(scaling(object)$y.scale)) predres <- predres * scaling(object)$y.scale$"scaled:scale" + scaling(object)$y.scale$"scaled:center" } } if (is.character(lev(object))) { ##classification & probabilities : return probabilitie matrix if(type == "probabilities") { colnames(multiprob) <- lev(object) return(multiprob) } ##classification & type response: return factors if(type == "response") return(factor (lev(object)[predres], levels = lev(object))) ##classification & votes : return votematrix if(type == "votes") return(votematrix) } else ##else: return raw values return(predres) }) setMethod("show","gausspr", function(object){ cat("Gaussian Processes object of class \"gausspr\"","\n") cat(paste("Problem type:", type(object),"\n")) cat("\n") show(kernelf(object)) cat(paste("\nNumber of training instances learned :", dim(xmatrix(object))[1],"\n")) if(!is.null(fitted(object))) cat(paste("Train error :", round(error(object),9),"\n")) ##train error & loss if(cross(object)!=-1) cat("Cross validation error :",round(cross(object),9),"\n") }) solveiter <- function(B,noiseproc,b,x,itmax = 50,tol = 10e-4 ,verbose = FALSE) { ## ---------------------------- ## Preconditioned Biconjugate Gradient method ## solves linear system Ax <- b for general A ## ------------------------------------------ ## x : initial guess ## itmax : max # iterations ## iterates while mean(abs(Ax-b)) > tol ## ## Simplified form of Numerical Recipes: linbcg ## ## The preconditioned matrix is set to inv(diag(A)) ## A defined through A <- I + N*B diagA <- matrix(1,dim(B)[1],1) + colSums(B)+ diag(B)*(noiseproc-1) ## diags of A cont <- 0 iter <- 0 r <- .Amul2(x,B,noiseproc) r <- b - r rr <- r znrm <- 1 bnrm <- sqrt(sum((b)^2)) z <- r/diagA err <- sqrt(sum((.Amul2(x,B,noiseproc) - b)^2))/bnrm while (iter <= itmax){ iter <- iter + 1 zm1nrm <- znrm zz <- rr/diagA bknum<- drop(crossprod(z,rr)) if (iter == 1) { p <- z pp <- zz } else { bk <- bknum/bkden p <- bk*p + z pp <- bk*pp + zz } bkden <- bknum z <- .Amul2(p,B,noiseproc) akden <- drop(crossprod(z,pp)) ak <- bknum/akden zz <- .Amul2T(pp,B,noiseproc) x <- x + ak*p r <- r - ak*z rr <- rr - ak*zz z <- r/diagA znrm <- 1 err <- mean(abs(r)) if (err tol && counter < maxiter ) { ## Aggressively allocate memory if(counter %% BLOCKSIZE == 0) { Tktmp <- matrix(0, m, dim(Tk)[2] + BLOCKSIZE) Tktmp[1:m > 0, 1:(dim(Tk)[2] + BLOCKSIZE) <= dim(Tk)[2]] <- Tk Tk <- Tktmp Ttmp <- matrix(0, dim(T)[1]+BLOCKSIZE, BLOCKSIZE+counter) ind <- 1:(dim(T)[1]+BLOCKSIZE) <= dim(T)[1] ind2 <- 1:(BLOCKSIZE + counter) <= counter Ttmp[ind , ind2] <- T Ttmp[ind == FALSE, ind2 == FALSE] <- diag(1, BLOCKSIZE) T <- Ttmp padded.veck.tmp <- matrix(0,dim(padded.veck)[1]+BLOCKSIZE) padded.veck.tmp[1:(dim(padded.veck)[1]+BLOCKSIZE) <= dim(padded.veck)[1]] <- padded.veck padded.veck <- padded.veck.tmp pivots.tmp <- matrix(0, dim(pivots)[1]+BLOCKSIZE) pivots.tmp[1:(dim(pivots)[1] + BLOCKSIZE)<= dim(pivots)[1]] <- pivots pivots <- pivots.tmp maxresiduals.tmp <- matrix(0,dim(maxresiduals)[1]+BLOCKSIZE) maxresiduals.tmp[1:(dim(maxresiduals)[1]+BLOCKSIZE) <= dim(maxresiduals)[1]] <- maxresiduals maxresiduals <- maxresiduals.tmp if(counter == 0) t <- rep(0,BLOCKSIZE) else t <- rep(0,length(t)+BLOCKSIZE) } veck <- kernelFast(kernel, x, x[index, ,drop=FALSE],dota) if (counter == 0) { ## No need to compute t here tau <- sqrt(veck[index]) ## Update T T[1, 1] <- tau ## Compute the update for Tk update <- veck/tau } else { padded.veck[1:counter] <- veck[pivots[1:counter]] ## First compute t ## t <- t(crossprod(padded.veck,backsolve(T,diag(1,nrow=dim(T)[1])))) ## cat("T: ",dim(T), " p:",length(padded.veck),",\n") t[1:counter] <- backsolve(T, k=counter, padded.veck, transpose = TRUE) ## Now compute tau tau <- as.vector(sqrt(veck[index] - crossprod(t))) ## Update T T[1:counter, counter+1] <- t[1:counter] T[counter + 1, counter + 1] <- tau ## Compute the update for Tk update <- (1/tau) * (veck - Tk %*% t) } ## Update Tk Tk[,counter + 1] <- update ## Update diagonal residuals diag.residues <- diag.residues - update^2 ## Update pivots pivots[counter + 1] <- index ## Monitor residuals maxresiduals[counter + 1] <- residue ## Choose next candidate residue <- max( diag.residues ) index <- which.max(diag.residues) ## Update counter counter <- counter + 1 ## Report progress to the user if(counter%%blocksize == 0 && (verbose == TRUE)) cat("counter = ",counter," ", "residue = ", residue, "\n") } ## Throw away extra columns which we might have added Tk <- Tk[, 1:counter] pivots <- pivots[1:counter] maxresiduals <- maxresiduals[1:counter] return(new("inchol",.Data=Tk,pivots=pivots,diagresidues = diag.residues, maxresiduals = maxresiduals)) }) kernlab/R/csi.R0000644000175100001440000003653711304023134013015 0ustar hornikusers## 15.09.2005 alexandros setGeneric("csi", function(x, y, kernel="rbfdot",kpar=list(sigma=0.1), rank, centering = TRUE, kappa =0.99 ,delta = 40 ,tol = 1e-4) standardGeneric("csi")) setMethod("csi",signature(x="matrix"), function(x, y, kernel="rbfdot",kpar=list(sigma=0.1), rank, centering = TRUE, kappa =0.99 ,delta = 40 ,tol = 1e-5) { ## G,P,Q,R,error1,error2,error,predicted.gain,true.gain ## INPUT ## x : data ## y : target vector n x d ## m : maximal rank ## kappa : trade-off between approximation of K and prediction of y (suggested: .99) ## centering : 1 if centering, 0 otherwise (suggested: 1) ## delta : number of columns of cholesky performed in advance (suggested: 40) ## tol : minimum gain at iteration (suggested: 1e-4) ## OUTPUT ## G : Cholesky decomposition -> K(P,P) is approximated by G*G' ## P : permutation matrix ## Q,R : QR decomposition of G (or center(G) if centering) ## error1 : tr(K-G*G')/tr(K) at each step of the decomposition ## error2 : ||y-Q*Q'*y||.F^2 / ||y||.F^2 at each step of the decomposition ## predicted.gain : predicted gain before adding each column ## true.gain : actual gain after adding each column n <- dim(x)[1] d <- dim(y)[2] if(n != dim(y)[1]) stop("Labels y and data x dont match") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") m <- rank ## make sure rank is smaller than n m <- min(n-2,m) G <- matrix(0,n,min(m+delta,n)) ## Cholesky factor diagK <- rep(drop(kernel(x[1,],x[1,])),n) P <- 1:n ## pivots Q <- matrix(0,n,min(m+delta,n)) ## Q part of the QR decomposition R <- matrix(0,min(m+delta,n),min(m+delta,n)) ## R part of the QR decomposition traceK <- sum(diagK) lambda <- (1-kappa)/traceK if (centering) y <- y - (1/n) * t(matrix(colSums(y),d,n)) sumy2 <- sum(y^2) mu <- kappa/sumy2 error1 <- traceK error2 <- sumy2 predictedgain <- truegain <- rep(0,min(m+delta,n)) k <- 0 # current index of the Cholesky decomposition kadv <- 0 # current index of the look ahead steps Dadv <- diagK D <- diagK ## makes sure that delta is smaller than n - 2 delta <- min(delta,n - 2) ## approximation cost cached quantities A1 <- matrix(0,n,1) A2 <- matrix(0,n,1) A3 <- matrix(0,n,1) GTG <- matrix(0,m+delta,m+delta) QTy <- matrix(0,m+delta,d) QTyyTQ <- matrix(0,m+delta,m+delta) ## first performs delta steps of Cholesky and QR decomposition if(delta > 0) for (i in 1:delta) { kadv <- kadv + 1 ## select best index diagmax <- Dadv[kadv] jast <- 1 for (j in 1:(n-kadv+1)) { if (Dadv[j+kadv-1] > diagmax/0.99){ diagmax <- Dadv[j+kadv-1] jast <- j } } if (diagmax < 1e-12){ kadv <- kadv - 1 ## all pivots are too close to zero, stops break ## this can only happen if the matrix has rank less than delta } else{ jast <- jast + kadv-1 ## permute indices P[c(kadv,jast)] <- P[c(jast,kadv)] Dadv[c(kadv, jast)] <- Dadv[c(jast, kadv)] D[c(kadv, jast)] <- D[c(jast, kadv)] A1[c(kadv, jast)] <- A1[c(jast, kadv)] G[c(kadv, jast),1:kadv-1] <- G[c(jast,kadv),1:kadv-1] Q[c(kadv, jast),1:kadv-1] <- Q[c(jast, kadv),1:kadv-1] ## compute new Cholesky column G[kadv,kadv] <- Dadv[kadv] G[kadv,kadv] <- sqrt(G[kadv,kadv]) newKcol <- kernelMatrix(kernel, x[P[(kadv+1):n],,drop = FALSE],x[P[kadv],,drop=FALSE]) G[(kadv+1):n,kadv]<- (1/G[kadv,kadv])*(newKcol - G[(kadv+1):n,1:kadv-1,drop=FALSE] %*% t(G[kadv,1:kadv-1,drop=FALSE])) ## update diagonal Dadv[(kadv+1):n] <- Dadv[(kadv+1):n] - G[(kadv+1):n,kadv]^2 Dadv[kadv] <- 0 ## performs QR if (centering) Gcol <- G[,kadv,drop=FALSE] - (1/n) * matrix(sum(G[,kadv]),n,1) else Gcol <- G[,kadv, drop=FALSE] R[1:kadv-1,kadv] <- crossprod(Q[,1:kadv-1, drop=FALSE], Gcol) Q[,kadv] <- Gcol - Q[,1:kadv-1,drop=FALSE] %*% R[1:kadv-1,kadv,drop=FALSE] R[kadv,kadv] <- sqrt(sum(Q[,kadv]^2)) Q[,kadv] <- Q[,kadv]/drop(R[kadv,kadv]) ## update cached quantities if (centering) GTG[1:kadv,kadv] <- crossprod(G[,1:kadv], G[,kadv]) else GTG[1:kadv,kadv] <- crossprod(R[1:kadv,1:kadv], R[1:kadv,kadv]) GTG[kadv,1:kadv] <- t(GTG[1:kadv,kadv]) QTy[kadv,] <- crossprod(Q[,kadv], y[P,,drop = FALSE]) QTyyTQ[kadv,1:kadv] <- QTy[kadv,,drop=FALSE] %*% t(QTy[1:kadv,,drop=FALSE]) QTyyTQ[1:kadv,kadv] <- t(QTyyTQ[kadv,1:kadv]) ## update costs A1[kadv:n] <- A1[kadv:n] + GTG[kadv,kadv] * G[kadv:n,kadv]^2 A1[kadv:n] <- A1[kadv:n] + 2 * G[kadv:n,kadv] *(G[kadv:n,1:kadv-1] %*% GTG[1:kadv-1,kadv,drop=FALSE]) } } ## compute remaining costs for all indices A2 <- rowSums(( G[,1:kadv,drop=FALSE] %*% crossprod(R[1:kadv,1:kadv], QTy[1:kadv,,drop=FALSE]))^2) A3 <- rowSums((G[,1:kadv,drop=FALSE] %*% t(R[1:kadv,1:kadv]))^2) ## start main loop while (k < m){ k <- k +1 ## compute the gains in approximation for all remaining indices dJK <- matrix(0,(n-k+1),1) for (i in 1:(n-k+1)) { kast <- k+i-1 if (D[kast] < 1e-12) dJK[i] <- -1e100 ## this column is already generated by already ## selected columns -> cannot be selected else { dJK[i] <- A1[kast] if (kast > kadv) ## add eta dJK[i] <- dJK[i] + D[kast]^2 - (D[kast] - Dadv[kast])^2 dJK[i] <- dJK[i] / D[kast] } } dJy <- matrix(0,n-k+1,1) if (kadv > k){ for (i in 1:(n-k+1)) { kast <- k+i-1 if (A3[kast] < 1e-12) dJy[i] <- 0 else dJy[i] <- A2[kast] / A3[kast] } } ## select the best column dJ <- lambda * dJK + mu * dJy diagmax <- -1 jast <- 0 for (j in 1:(n-k+1)) { if (D[j+k-1] > 1e-12) if (dJ[j] > diagmax/0.9){ diagmax <- dJ[j] jast <- j } } if (jast==0) { ## no more good indices, exit k <- k-1 break } jast <- jast + k - 1 predictedgain[k] <- diagmax ## performs one cholesky + QR step: ## if new pivot not already selected, use pivot ## otherwise, select new look ahead index that maximize Dadv if (jast > kadv){ newpivot <- jast jast <- kadv + 1 } else{ a <- 1e-12 b <- 0 for (j in 1:(n-kadv)) { if (Dadv[j+kadv] > a/0.99){ a <- Dadv[j+kadv] b <- j+kadv } } if (b==0) newpivot <- 0 else newpivot <- b } if (newpivot > 0){ ## performs steps kadv <- kadv + 1 ## permute P[c(kadv, newpivot)] <- P[c(newpivot, kadv)] Dadv[c(kadv, newpivot)] <- Dadv[c(newpivot, kadv)] D[c(kadv, newpivot)] <- D[c(newpivot, kadv)] A1[c(kadv, newpivot)] <- A1[c(newpivot, kadv)] A2[c(kadv, newpivot)] <- A2[c(newpivot, kadv)] A3[c(kadv, newpivot)] <- A3[c(newpivot, kadv)] G[c(kadv, newpivot),1:kadv-1] <- G[c(newpivot, kadv),1:kadv-1] Q[c(kadv, newpivot),1:kadv-1] <- Q[ c(newpivot, kadv),1:kadv-1] ## compute new Cholesky column G[kadv,kadv] <- Dadv[kadv] G[kadv,kadv] <- sqrt(G[kadv,kadv]) newKcol <- kernelMatrix(kernel,x[P[(kadv+1):n],,drop=FALSE],x[P[kadv],,drop=FALSE]) G[(kadv+1):n,kadv] <- 1/G[kadv,kadv]*( newKcol - G[(kadv+1):n,1:kadv-1,drop=FALSE]%*%t(G[kadv,1:kadv-1,drop=FALSE])) ## update diagonal Dadv[(kadv+1):n] <- Dadv[(kadv+1):n] - G[(kadv+1):n,kadv]^2 Dadv[kadv] <- 0 ## performs QR if (centering) Gcol <- G[,kadv,drop=FALSE] - 1/n * matrix(sum(G[,kadv]),n,1 ) else Gcol <- G[,kadv,drop=FALSE] R[1:kadv-1,kadv] <- crossprod(Q[,1:kadv-1], Gcol) Q[,kadv] <- Gcol - Q[,1:kadv-1, drop=FALSE] %*% R[1:kadv-1,kadv, drop=FALSE] R[kadv,kadv] <- sum(abs(Q[,kadv])^2)^(1/2) Q[,kadv] <- Q[,kadv] / drop(R[kadv,kadv]) ## update the cached quantities if (centering) GTG[k:kadv,kadv] <- crossprod(G[,k:kadv], G[,kadv]) else GTG[k:kadv,kadv] <- crossprod(R[1:kadv,k:kadv], R[1:kadv,kadv]) GTG[kadv,k:kadv] <- t(GTG[k:kadv,kadv]) QTy[kadv,] <- crossprod(Q[,kadv], y[P,,drop =FALSE]) QTyyTQ[kadv,k:kadv] <- QTy[kadv,,drop = FALSE] %*% t(QTy[k:kadv,,drop = FALSE]) QTyyTQ[k:kadv,kadv] <- t(QTyyTQ[kadv,k:kadv]) ## update costs A1[kadv:n] <- A1[kadv:n] + GTG[kadv,kadv] * G[kadv:n,kadv]^2 A1[kadv:n] <- A1[kadv:n] + 2 * G[kadv:n,kadv] * (G[kadv:n,k:kadv-1,drop = FALSE] %*% GTG[k:kadv-1,kadv,drop=FALSE]) A3[kadv:n] <- A3[kadv:n] + G[kadv:n,kadv]^2 * sum(R[k:kadv,kadv]^2) temp <- crossprod(R[k:kadv,kadv,drop = FALSE], R[k:kadv,k:kadv-1,drop = FALSE]) A3[kadv:n] <- A3[kadv:n] + 2 * G[kadv:n,kadv] * (G[kadv:n,k:kadv-1] %*% t(temp)) temp <- crossprod(R[k:kadv,kadv,drop = FALSE], QTyyTQ[k:kadv,k:kadv,drop = FALSE]) temp1 <- temp %*% R[k:kadv,kadv,drop = FALSE] A2[kadv:n] <- A2[kadv:n] + G[kadv:n,kadv,drop = FALSE]^2 %*% temp1 temp2 <- temp %*% R[k:kadv,k:kadv-1] A2[kadv:n] <- A2[kadv:n] + 2 * G[kadv:n,kadv] * (G[kadv:n,k:kadv-1,drop=FALSE] %*% t(temp2)) } ## permute pivots in the Cholesky and QR decomposition between p,q p <- k q <- jast if (p < q){ ## store some quantities Gbef <- G[,p:q] Gbeftotal <- G[,k:kadv] GTGbef <- GTG[p:q,p:q] QTyyTQbef <- QTyyTQ[p:q,k:kadv] Rbef <- R[p:q,p:q] Rbeftotal <- R[k:kadv,k:kadv] tempG <- diag(1,q-p+1,q-p+1) tempQ <- diag(1,q-p+1,q-p+1) for (s in seq(q-1,p,-1)) { ## permute indices P[c(s, s+1)] <- P[c(s+1, s)] Dadv[c(s, s+1)] <- Dadv[c(s+1, s)] D[c(s, s+1)] <- D[c(s+1, s)] A1[c(s, s+1)] <- A1[c(s+1, s)] A2[c(s, s+1)] <- A2[c(s+1, s)] A3[c(s, s+1)] <- A3[c(s+1, s)] G[c(s, s+1),1:kadv] <- G[c(s+1,s), 1:kadv] Gbef[c(s, s+1),] <- Gbef[c(s+1, s),] Gbeftotal[c(s, s+1),] <- Gbeftotal[c(s+1, s),] Q[c(s, s+1),1:kadv] <- Q[c(s+1, s) ,1:kadv] ## update decompositions res <- .qr2(t(G[s:(s+1),s:(s+1)])) Q1 <- res$Q R1 <- res$R G[,s:(s+1)] <- G[,s:(s+1)] %*% Q1 G[s,(s+1)] <- 0 R[1:kadv,s:(s+1)] <- R[1:kadv,s:(s+1)] %*% Q1 res <- .qr2(R[s:(s+1),s:(s+1)]) Q2 <- res$Q R2 <- res$R R[s:(s+1),1:kadv] <- crossprod(Q2, R[s:(s+1),1:kadv]) Q[,s:(s+1)] <- Q[,s:(s+1)] %*% Q2 R[s+1,s] <- 0 ## update relevant quantities if( k <= (s-1) && s+2 <= kadv) nonchanged <- c(k:(s-1), (s+2):kadv) if( k <= (s-1) && s+2 > kadv) nonchanged <- k:(s-1) if( k > (s-1) && s+2 <= kadv) nonchanged <- (s+2):kadv GTG[nonchanged,s:(s+1)] <- GTG[nonchanged,s:(s+1)] %*% Q1 GTG[s:(s+1),nonchanged] <- t(GTG[nonchanged,s:(s+1)]) GTG[s:(s+1),s:(s+1)] <- crossprod(Q1, GTG[s:(s+1),s:(s+1)] %*% Q1) QTy[s:(s+1),] <- crossprod(Q2, QTy[s:(s+1),]) QTyyTQ[nonchanged,s:(s+1)] <- QTyyTQ[nonchanged,s:(s+1)] %*% Q2 QTyyTQ[s:(s+1),nonchanged] <- t(QTyyTQ[nonchanged,s:(s+1)]) QTyyTQ[s:(s+1),s:(s+1)] <- crossprod(Q2, QTyyTQ[s:(s+1),s:(s+1)] %*% Q2) tempG[,(s-p+1):(s-p+2)] <- tempG[,(s-p+1):(s-p+2)] %*% Q1 tempQ[,(s-p+1):(s-p+2)] <- tempQ[,(s-p+1):(s-p+2)] %*% Q2 } ## update costs tempG <- tempG[,1] tempGG <- GTGbef %*% tempG A1[k:n] <- A1[k:n] - 2 * G[k:n,k] * (Gbef[k:n,] %*% tempGG) # between p and q -> different if(k > (p-1) ) kmin <- 0 else kmin <- k:(p-1) if((q+1) > kadv) qmin <- 0 else qmin <- (q+1):kadv A1[k:n] <- A1[k:n] - 2 * G[k:n,k] * (G[k:n,kmin,drop=FALSE] %*% GTG[kmin,k,drop=FALSE]) # below p A1[k:n] <- A1[k:n] - 2 * G[k:n,k] * (G[k:n,qmin,drop=FALSE] %*% GTG[qmin,k,drop=FALSE]) # above q tempQ <- tempQ[,1] temp <- G[k:n,qmin,drop=FALSE] %*% t(R[k,qmin,drop=FALSE]) temp <- temp + G[k:n,kmin,drop=FALSE] %*% t(R[k,kmin,drop=FALSE]) temp <- temp + Gbef[k:n,] %*% crossprod(Rbef, tempQ) A3[k:n] <- A3[k:n] - temp^2 A2[k:n] <- A2[k:n] + temp^2 * QTyyTQ[k,k] temp2 <- crossprod(tempQ,QTyyTQbef) %*% Rbeftotal A2[k:n] <- A2[k:n] - 2 * temp * (Gbeftotal[k:n,,drop=FALSE] %*% t(temp2)) } else { ## update costs A1[k:n] <- A1[k:n] - 2 * G[k:n,k] * (G[k:n,k:kadv,drop=FALSE] %*% GTG[k:kadv,k,drop=FALSE]) A3[k:n]<- A3[k:n] - (G[k:n,k:kadv,drop=FALSE] %*% t(R[k,k:kadv,drop=FALSE]))^2 temp <- G[k:n,k:kadv,drop=FALSE] %*% t(R[k,k:kadv,drop=FALSE]) A2[k:n] <- A2[k:n] + (temp^2) * QTyyTQ[k,k] temp2 <- QTyyTQ[k,k:kadv,drop=FALSE] %*% R[k:kadv,k:kadv,drop=FALSE] A2[k:n] <- A2[k:n] - 2 * temp * (G[k:n,k:kadv,drop=FALSE] %*% t(temp2)) } ## update diagonal and other quantities (A1,B1) D[(k+1):n] <- D[(k+1):n] - G[(k+1):n,k]^2 D[k] <- 0 A1[k:n] <- A1[k:n] + GTG[k,k] * (G[k:n,k]^2) ## compute errors and true gains temp2 <- crossprod(Q[,k], y[P,]) temp2 <- sum(temp2^2) temp1 <- sum(G[,k]^2) truegain[k] <- temp1 * lambda + temp2 * mu error1[k+1] <- error1[k] - temp1 error2[k+1] <- error2[k] - temp2 if (truegain[k] < tol) break } ## reduce dimensions of decomposition G <- G[,1:k,drop=FALSE] Q <- Q[,1:k,drop=FALSE] R <- R[1:k,1:k,drop=FALSE] ## compute and normalize errors error <- lambda * error1 + mu * error2 error1 <- error1 / traceK error2 <- error2 / sumy2 repivot <- sort(P, index.return = TRUE)$ix return(new("csi",.Data=G[repivot, ,drop=FALSE],Q= Q[repivot,,drop = FALSE], R = R, pivots=repivot, diagresidues = error1, maxresiduals = error2, truegain = truegain, predgain = predictedgain)) }) ## I guess we can replace this with qr() .qr2 <- function(M) { ## QR decomposition for 2x2 matrices Q <- matrix(0,2,2) R <- matrix(0,2,2) x <- sqrt(M[1,1]^2 + M[2,1]^2) R[1,1] <- x Q[,1] <- M[,1]/x R[1,2] <- crossprod(Q[,1], M[,2]) Q[,2] <- M[,2] - R[1,2] * Q[,1] R[2,2] <- sum(abs(Q[,2])^2)^(1/2) Q[,2] <- Q[,2] / R[2,2] return(list(Q=Q,R=R)) } kernlab/R/lssvm.R0000644000175100001440000005506412676465003013421 0ustar hornikusers## reduced least squares support vector machines ## author : alexandros setGeneric("lssvm", function(x, ...) standardGeneric("lssvm")) setMethod("lssvm",signature(x="formula"), function (x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE){ cl <- match.call() m <- match.call(expand.dots = FALSE) if (is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m$... <- NULL m$formula <- m$x m$x <- NULL m$scaled <- NULL m[[1L]] <- quote(stats::model.frame) m <- eval(m, parent.frame()) Terms <- attr(m, "terms") attr(Terms, "intercept") <- 0 ## no intercept x <- model.matrix(Terms, m) y <- model.extract(m, "response") if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { remove <- unique(c(which(labels(Terms) %in% names(attr(x, "contrasts"))), which(!scaled) ) ) scaled <- !attr(x, "assign") %in% remove } ret <- lssvm(x, y, scaled = scaled, ...) kcall(ret) <- cl attr(Terms,"intercept") <- 0 ## no intercept terms(ret) <- Terms if (!is.null(attr(m, "na.action"))) n.action(ret) <- attr(m, "na.action") return (ret) }) setMethod("lssvm",signature(x="vector"), function(x,...) { x <- t(t(x)) ret <- lssvm(x, ...) return(ret) }) setMethod("lssvm",signature(x="matrix"), function (x, y, scaled = TRUE, kernel = "rbfdot", kpar = "automatic", type = NULL, tau = 0.01, reduced = TRUE, tol = 0.0001, rank = floor(dim(x)[1]/3), delta = 40, ## prob.model = FALSE, cross = 0, fit = TRUE, ..., subset, na.action = na.omit) { ## subsetting and na-handling for matrices ret <- new("lssvm") if (!missing(subset)) x <- x[subset,] df <- unique(na.action(data.frame(y, x))) y <- df[,1] x <- as.matrix(df[,-1]) n.action(ret) <- na.action if(!is.null(type)) type(ret) <- match.arg(type,c("classification","regression")) if (is.null(type)) type(ret) <- if (is.factor(y)) "classification" else "regression" else type(ret) <- type ## scaling, subsetting, and NA handling x.scale <- y.scale <- NULL ## scaling if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { co <- !apply(x[,scaled, drop = FALSE], 2, var) if (any(co)) { scaled <- rep(FALSE, ncol(x)) warning(paste("Variable(s)", paste("`",colnames(x[,scaled, drop = FALSE])[co], "'", sep="", collapse=" and "), "constant. Cannot scale data.") ) } else { xtmp <- scale(x[,scaled]) x[,scaled] <- xtmp x.scale <- attributes(xtmp)[c("scaled:center","scaled:scale")] } } ncols <- ncol(x) m <- nrows <- nrow(x) if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot","matrix")) if(kernel == "matrix") if(dim(x)[1]==dim(x)[2]) return(lssvm(as.kernelMatrix(x), y = y,type = NULL, tau = 0.01, tol = 0.0001, rank = floor(dim(x)[1]/3), delta = 40, cross = 0, fit = TRUE, ...)) else stop(" kernel matrix not square!") if(is.character(kpar)) if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot") && kpar=="automatic" ) { cat (" Setting default kernel parameters ","\n") kpar <- list() } } if (!is.function(kernel)) if (!is.list(kpar)&&is.character(kpar)&&(class(kernel)=="rbfkernel" || class(kernel) =="laplacedot" || kernel == "laplacedot"|| kernel=="rbfdot")){ kp <- match.arg(kpar,"automatic") if(kp=="automatic") kpar <- list(sigma=mean(sigest(x,scaled=FALSE)[c(1,3)])) cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n") } if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if(type(ret)=="classification") { if (!is.vector(y) && !is.factor (y)) stop("y must be a vector or a factor.") if(is(y,"vector")) { y <- as.matrix(y) if (nrows != nrow(y)) stop("x and y don't match.") } if (is.factor(y)) { lev(ret) <- levels (y) y <- as.integer (y) if (nrows != length(y)) stop("x and y don't match.") } else if (is.numeric(y)) { y <- as.integer(y) lev(ret) <- unique (y) } else stop ("dependent variable has to be of factor or integer type for classification mode.") ## initialize nclass(ret) <- length (unique(y)) p <- 0 svindex <- NULL ## create multidimensional y matrix yind <- t(matrix(1:nclass(ret),nclass(ret),m)) ymat <- matrix(0, m, nclass(ret)) ymat[yind==y] <- 1 if(reduced == FALSE) { K <- kernelMatrix(kernel,x) KP <- K - (1/m)*colSums(K) beta <- solve((KP%*%K + m * tau * K), KP%*%ymat) b <- colMeans(ymat) - colMeans(K%*%beta) alphaindex(ret) <- 1:m } else { G <- csi(x, ymat, rank = rank ,kernel= kernel, delta = delta , tol = tol) rep <- sort(pivots(G),index.return=TRUE)$ix G <- G[rep,] GtP <- t(G) - matrix(rowSums(t(G))/dim(G)[1],dim(G)[2],dim(G)[1]) Gtalpha <- (GtP)%*%G diag(Gtalpha) <- diag(Gtalpha) + tau Gtalpha <- solve(Gtalpha) %*% GtP %*% ymat[rep,,drop=FALSE] beta <- solve(t(G[1:dim(G)[2],]), Gtalpha) b <- colMeans(ymat) - colMeans(G%*%Gtalpha) alphaindex(ret) <- rep[1:dim(G)[2]] } alpha(ret) <- beta ## nonzero alpha*y coef(ret) <- alpha(ret) ## store SV indexes from current problem for later use in predict ## save the indexes from all the SV in a vector (use unique?) svindex <- alphaindex(ret) ## store betas in a vector b(ret) <- b ##store C in return object param(ret)$tau <- tau ## calculate class prob. ## if (prob.model& reduced== TRUE) # warning("Class Probapilities not supported for reduced model.) ## if(prob.model & reduced == FALSE) ## { ## pos <- as.vector(ymat)==1 ## neg <- as.vector(ymat)==-1 ## ones <- rep(1,dim(x)[1]) ## onesneg <- ones[pos] <- 0 ## ones <- rep(1,dim(x)[1]) ## onespos <- ones[neg] <- 0 ##Kpos <- kernelMult(kernel,x,x[pos,],rep(1,sum(pos))) ##Kneg <- kernelMult(kernel,x,x[neg,],rep(1,sum(neg))) ## Kpos <- K[,pos]%*%rep(1,sum(pos)) ## Kneg <- K[,neg]%*%rep(1,sum(neg)) ## classmeans <- c(sum( Kpos * coef(ret)[pos] * as.vector(ymat)[pos]),sum( Kneg * coef(ret)[pos] * as.vector(ymat)[pos])) ## kneg <- K%*%onesneg ## kpos <- K%*%onespos ## M <- (diag(dim(x)[1])- (1/dim(x)[1])*rep(1,dim(x)[1])%*%t(rep(1,dim(x)[1]))) ## kcentered <- M%*%solve(diag(dim(x)[1]) - tau*M%*%K%*%M)%*%M ## prob.model(ret) <- list(Kpos=Kpos, Kneg=Kneg, kcentered=kcentered, classmeans=classmeans) ## } } if(type(ret)=="regression") { if (nrows != nrow(x)) stop("x and y don't match.") ## initialize p <- 0 svindex <- NULL ymat <- y G <- csi(x, ymat, rank = rank ,kernel= kernel, delta = delta , tol = tol) GtP <- t(G) - matrix(rowSums(t(G))/dim(G)[1],dim(G)[2],dim(G)[1]) Gtalpha <- (GtP)%*%G diag(Gtalpha) <- diag(Gtalpha) + tau Gtalpha <- solve(Gtalpha) %*% GtP %*% ymat beta <- solve(t(G[1:dim(G)[2],]), Gtalpha) b <- colMeans(ymat) - colMeans(G%*%Gtalpha) alpha(ret) <- beta ## nonzero alpha*y coef(ret) <- alpha(ret) ## store SV indexes from current problem for later use in predict alphaindex(ret) <- pivots(G)[1:dim(G)[2]] ## save the indexes from all the SV in a vector (use unique?) svindex <- alphaindex(ret) ## store betas in a vector b(ret) <- b ##store C in return object param(ret)$tau <- tau } kcall(ret) <- match.call() kernelf(ret) <- kernel ## param(ret) <- list(C=C, nu = nu, epsilon = epsilon) xmatrix(ret) <- x[alphaindex(ret),,drop = FALSE] ymatrix(ret) <- y nSV(ret) <- length(svindex) if(nSV(ret)==0) stop("No Support Vectors found. You may want to change your parameters") fitted(ret) <- if (fit) predict(ret, x) else NA scaling(ret) <- list(scaled = scaled, x.scale = x.scale) if (fit){ if(type(ret)=="classification") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="regression") error(ret) <- drop(crossprod(fitted(ret) - y)/m) } cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) cret <- lssvm(x[cind,],y[cind],type = type(ret),kernel=kernel,kpar = NULL,reduced = reduced, tau=tau, tol=tol, rank = floor(rank/cross), delta = floor(delta/cross), scaled=FALSE, cross = 0, fit = FALSE) cres <- predict(cret, x[vgr[[i]],]) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } cross(ret) <- cerror } return(ret) }) ## kernelMatrix interface setMethod("lssvm",signature(x="kernelMatrix"), function (x, y, type = NULL, tau = 0.01, tol = 0.0001, rank = floor(dim(x)[1]/3), delta = 40, cross = 0, fit = TRUE, ...) { ## subsetting and na-handling for matrices ret <- new("lssvm") if(!is.null(type)) type(ret) <- match.arg(type,c("classification","regression")) if (is.null(type)) type(ret) <- if (is.factor(y)) "classification" else "regression" else type(ret) <- type ncols <- ncol(x) m <- nrows <- nrow(x) if(type(ret)=="classification") { if (!is.vector(y) && !is.factor (y)) stop("y must be a vector or a factor.") if (is(y,"vector")) { y <- as.matrix(y) if (nrows != nrow(y)) stop("x and y don't match.")} if (is.factor(y)) { lev(ret) <- levels (y) y <- as.integer (y) if (nrows != length(y)) stop("x and y don't match.") } else if (is.numeric(y)) { y <- as.integer(y) lev(ret) <- unique (y) } else stop ("dependent variable has to be of factor or integer type for classification mode.") ## initialize nclass(ret) <- length (unique(y)) p <- 0 svindex <- NULL ## create multidimensional y matrix yind <- t(matrix(1:nclass(ret),nclass(ret),m)) ymat <- matrix(0, m, nclass(ret)) ymat[yind==y] <- 1 KP <- x - (1/m)*colSums(x) beta <- solve((KP%*%x + m * tau * x), KP%*%ymat) b <- colMeans(ymat) - colMeans(x%*%beta) alphaindex(ret) <- 1:m alpha(ret) <- beta ## nonzero alpha*y coef(ret) <- alpha(ret) ## store SV indexes from current problem for later use in predict ## save the indexes from all the SV in a vector (use unique?) svindex <- alphaindex(ret) ## store betas in a vector b(ret) <- b ##store C in return object param(ret)$tau <- tau } if(type(ret)=="regression") { if (nrows != nrow(x)) stop("x and y don't match.") ## initialize p <- 0 svindex <- NULL ymat <- y G <- csi(x, ymat, rank = rank , delta = delta , tol = tol) GtP <- t(G) - matrix(rowSums(t(G))/dim(G)[1],dim(G)[2],dim(G)[1]) Gtalpha <- (GtP)%*%G diag(Gtalpha) <- diag(Gtalpha) + tau Gtalpha <- solve(Gtalpha) %*% GtP %*% ymat[pivots(G),,drop=FALSE] beta <- solve(t(G[1:dim(G)[2],]), Gtalpha) b <- colMeans(ymat) - colMeans(G%*%Gtalpha) alpha(ret) <- beta ## nonzero alpha*y coef(ret) <- alpha(ret) ## store SV indexes from current problem for later use in predict alphaindex(ret) <- pivots(G)[1:dim(G)[2]] ## save the indexes from all the SV in a vector (use unique?) svindex <- alphaindex(ret) ## store betas in a vector b(ret) <- b ##store C in return object param(ret)$tau <- tau } kcall(ret) <- match.call() ## param(ret) <- list(C=C, nu = nu, epsilon = epsilon) xmatrix(ret) <- x ymatrix(ret) <- y kernelf(ret) <- "Kernel matrix used for training." nSV(ret) <- length(svindex) if(nSV(ret)==0) stop("No Support Vectors found. You may want to change your parameters") fitted(ret) <- if (fit) predict(ret, x) else NA if (fit){ if(type(ret)=="classification") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="regression") error(ret) <- drop(crossprod(fitted(ret) - y)/m) } cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) cret <- lssvm(x[cind,cind],y[cind],type = type(ret), tau=tau, rank = floor(rank/cross), delta = floor(delta/cross), cross = 0, fit = FALSE) cres <- predict(cret, as.kernelMatrix(x[vgr[[i]], cind,drop = FALSE][,svindex,drop=FALSE])) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } cross(ret) <- cerror } return(ret) }) ## list interface setMethod("lssvm",signature(x="list"), function (x, y, scaled = TRUE, kernel = "stringdot", kpar = list(length=4, lambda = 0.5), type = NULL, tau = 0.01, reduced = TRUE, tol = 0.0001, rank = floor(dim(x)[1]/3), delta = 40, cross = 0, fit = TRUE, ..., subset) { ## subsetting and na-handling for matrices ret <- new("lssvm") if (!missing(subset)) x <- x[subset] if(!is.null(type)) type(ret) <- match.arg(type,c("classification","regression")) if (is.null(type)) type(ret) <- if (is.factor(y)) "classification" else "regression" else type(ret) <- type m <- nrows <- length(x) if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot","stringdot")) if(is.character(kpar)) if(kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot" || kernel == "rbfdot" || kernel == "laplacedot" ) { stop("List interface supports only the stringdot kernel.") } } if(is(kernel,"kernel")) if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if(type(ret)=="classification") { if (!is.vector(y) && !is.factor (y)) stop("y must be a vector or a factor.") if (nrows != nrow(x)) stop("x and y don't match.") if (is.factor(y)) { lev(ret) <- levels (y) y <- as.integer (y) } else if (is.numeric(y)) { y <- as.integer(y) lev(ret) <- unique (y) } else stop ("dependent variable has to be of factor or integer type for classification mode.") ## initialize nclass(ret) <- length (unique(y)) p <- 0 svindex <- NULL ## create multidimensional y matrix yind <- t(matrix(1:nclass(ret),nclass(ret),m)) ymat <- matrix(0, m, nclass(ret)) ymat[yind==y] <- 1 if(reduced == FALSE) { K <- kernelMatrix(kernel,x) KP <- K - (1/m)*colSums(K) beta <- solve((KP%*%K + m * tau * K), KP%*%ymat) b <- colMeans(ymat) - colMeans(K%*%beta) alphaindex(ret) <- 1:m } else { G <- csi(x, ymat, rank = rank ,kernel= kernel, delta = delta , tol = tol) GtP <- t(G) - matrix(rowSums(t(G))/dim(G)[1],dim(G)[2],dim(G)[1]) Gtalpha <- (GtP)%*%G diag(Gtalpha) <- diag(Gtalpha) + tau Gtalpha <- solve(Gtalpha) %*% GtP %*% ymat[pivots(G),,drop=FALSE] beta <- solve(t(G[1:dim(G)[2],]), Gtalpha) b <- colMeans(ymat) - colMeans(G%*%Gtalpha) alphaindex(ret) <- pivots(G)[1:dim(G)[2]] } alpha(ret) <- beta ## nonzero alpha*y coef(ret) <- alpha(ret) ## store SV indexes from current problem for later use in predict ## save the indexes from all the SV in a vector (use unique?) svindex <- alphaindex(ret) ## store betas in a vector b(ret) <- b ##store C in return object param(ret)$tau <- tau } if(type(ret)=="regression") { if (nrows != nrow(x)) stop("x and y don't match.") ## initialize p <- 0 svindex <- NULL ymat <- y G <- csi(x, ymat, rank = rank ,kernel= kernel, delta = delta , tol = tol) GtP <- t(G) - matrix(rowSums(t(G))/dim(G)[1],dim(G)[2],dim(G)[1]) Gtalpha <- (GtP)%*%G diag(Gtalpha) <- diag(Gtalpha) + tau Gtalpha <- solve(Gtalpha) %*% GtP %*% ymat[pivots(G),,drop=FALSE] beta <- solve(t(G[1:dim(G)[2],]), Gtalpha) b <- colMeans(ymat) - colMeans(G%*%Gtalpha) alpha(ret) <- beta ## nonzero alpha*y coef(ret) <- alpha(ret) ## store SV indexes from current problem for later use in predict alphaindex(ret) <- pivots(G)[1:dim(G)[2]] ## save the indexes from all the SV in a vector (use unique?) svindex <- alphaindex(ret) ## store betas in a vector b(ret) <- b ##store C in return object param(ret)$tau <- tau } kcall(ret) <- match.call() kernelf(ret) <- kernel ## param(ret) <- list(C=C, nu = nu, epsilon = epsilon) xmatrix(ret) <- x[alphaindex(ret)] ymatrix(ret) <- y SVindex(ret) <- svindex nSV(ret) <- length(svindex) if(nSV(ret)==0) stop("No Support Vectors found. You may want to change your parameters") fitted(ret) <- if (fit) predict(ret, x) else NA if (fit){ if(type(ret)=="classification") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="regression") error(ret) <- drop(crossprod(fitted(ret) - y)/m) } cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) cret <- lssvm(x[cind,],y[cind],type = type(ret),kernel=kernel,kpar = NULL,reduced = reduced, tau=tau, tol=tol, rank = floor(rank/cross), delta = floor(delta/cross), scaled=FALSE, cross = 0, fit = FALSE ) cres <- predict(cret, x[vgr[[i]],]) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } cross(ret) <- cerror } return(ret) }) #**************************************************************# setMethod("predict", signature(object = "lssvm"), function (object, newdata, type = "response", coupler = "minpair") { sc <- 0 type <- match.arg(type,c("response","probabilities","decision")) if (missing(newdata) && type!="response") return(fitted(object)) else if(missing(newdata)) { newdata <- xmatrix(object) sc <- 1 } ncols <- ncol(xmatrix(object)) nrows <- nrow(xmatrix(object)) oldco <- ncols if (!is.null(terms(object))) { if(!is.matrix(newdata)) newdata <- model.matrix(delete.response(terms(object)), as.data.frame(newdata), na.action = n.action(object)) } else newdata <- if (is.vector(newdata)) t(t(newdata)) else as.matrix(newdata) newcols <- 0 newnrows <- nrow(newdata) newncols <- ncol(newdata) newco <- newncols if (oldco != newco) stop ("test vector does not match model !") p<-0 if (!is.null(scaling(object)$x.scale) && sc != 1) newdata[,scaling(object)$scaled] <- scale(newdata[,scaling(object)$scaled, drop = FALSE], center = scaling(object)$x.scale$"scaled:center", scale = scaling(object)$x.scale$"scaled:scale" ) if(is(newdata,"kernelMatrix")) res <- newdata %*% coef(object) - b(object) else res <- t(t(kernelMult(kernelf(object), newdata,xmatrix(object), alpha(object))) + b(object)) if(type == "response" && type(object)=="classification"){ predres <- max.col(res) return(factor (lev(object)[predres], levels = lev(object))) } if (type == "decision" || type(object)=="regression") return(res) if (type =="probabilities" && type(object)=="classification") { res - prob.model(object)$classmeans return(res) } }) #****************************************************************************************# setMethod("show","lssvm", function(object){ cat("Least Squares Support Vector Machine object of class \"lssvm\"","\n") cat("\n") cat(paste("problem type :",type(object), "\n")) cat(paste(" parameter : tau =",param(object)$tau, "\n")) cat("\n") show(kernelf(object)) cat(paste("\nNumber of data points used for training :", nSV(object),"\n")) if(!is.null(fitted(object))) cat(paste("Training error :", round(error(object),6),"\n")) if(cross(object)!= -1) cat("Cross validation error :",round(cross(object),6),"\n") }) ##.partopro <- function(z,s,m){ ##return(2*pi*(1/sqrt((1/z)+s^2))*exp(-(m^2)/(2*((1/z)+s^2)))) ##} kernlab/R/kcca.R0000644000175100001440000000451012105726255013140 0ustar hornikusers## Simple kernel canonical corelation analysis ## author: alexandros karatzoglou setGeneric("kcca",function(x, y, kernel="rbfdot", kpar=list(sigma = 0.1), gamma=0.1, ncomps = 10, ...) standardGeneric("kcca")) setMethod("kcca", signature(x = "matrix"), function(x,y,kernel="rbfdot",kpar=list(sigma=0.1), gamma=0.1, ncomps =10, ...) { x <- as.matrix(x) y <- as.matrix(y) if(!(nrow(x)==nrow(y))) stop("Number of rows in x, y matrixes is not equal") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") Kx <- kernelMatrix(kernel,x) Ky <- kernelMatrix(kernel,y) n <- dim(Kx)[1] m <- 2 ## Generate LH VK <- matrix(0,n*2,n); VK[0:n,] <- Kx VK[(n+1):(2*n),] <- Ky LH <- tcrossprod(VK, VK) for (i in 1:m) LH[((i-1)*n+1):(i*n),((i-1)*n+1):(i*n)] <- 0 ## Generate RH RH <- matrix(0,n*m,n*m) RH[1:n,1:n] <- (Kx + diag(rep(gamma,n)))%*%Kx + diag(rep(1e-6,n)) RH[(n+1):(2*n),(n+1):(2*n)] <- (Ky + diag(rep(gamma,n)))%*%Ky + diag(rep(1e-6,n)) RH <- (RH+t(RH))/2 ei <- .gevd(LH,RH) ret <- new("kcca") kcor(ret) <- as.double(ei$gvalues[1:ncomps]) xcoef(ret) <- matrix(as.double(ei$gvectors[1:n,1:ncomps]),n) ycoef(ret) <- matrix(as.double(ei$gvectors[(n+1):(2*n),1:ncomps]),n) ## xvar(ret) <- rotated(xpca) %*% cca$xcoef ## yvar(ret) <- rotated(ypca) %*% cca$ycoef return(ret) }) ## gevd compute the generalized eigenvalue ## decomposition for (a,b) .gevd<-function(a,b=diag(nrow(a))) { bs<-.mfunc(b,function(x) .ginvx(sqrt(x))) ev<-eigen(bs%*%a%*%bs) return(list(gvalues=ev$values,gvectors=bs%*%ev$vectors)) } ## mfunc is a helper to compute matrix functions .mfunc<-function(a,fn=sqrt) { e<-eigen(a); y<-e$vectors; v<-e$values return(tcrossprod(y%*%diag(fn(v)),y)) } ## ginvx is a helper to compute reciprocals .ginvx<-function(x) {ifelse(x==0,0,1/x)} kernlab/R/kmmd.R0000644000175100001440000002030012560371302013154 0ustar hornikusers## calculates the kernel maximum mean discrepancy for samples from two distributions ## author: alexandros karatzoglou setGeneric("kmmd",function(x,...) standardGeneric("kmmd")) setMethod("kmmd", signature(x = "matrix"), function(x, y, kernel="rbfdot",kpar="automatic", alpha = 0.05, asymptotic = FALSE, replace = TRUE, ntimes = 150, frac = 1, ...) { x <- as.matrix(x) y <- as.matrix(y) res <- new("kmmd") if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot","matrix")) if(kernel == "matrix") if(dim(x)[1]==dim(x)[2]) return(kmmd(x= as.kernelMatrix(x), y = y, Kxy = as.kernelMatrix(x)%*%y, alpha = 0.05, asymptotic = FALSE, replace = TRUE, ntimes = 100, frac = 1, ...)) else stop(" kernel matrix not square!") if(is.character(kpar)) if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot") && kpar=="automatic" ) { cat (" Setting default kernel parameters ","\n") kpar <- list() } } if (!is.function(kernel)) if (!is.list(kpar)&&is.character(kpar)&&(kernel == "laplacedot"|| kernel=="rbfdot")){ kp <- match.arg(kpar,"automatic") if(kp=="automatic") kpar <- list(sigma=sigest(rbind(x,y),scaled=FALSE)[2]) cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n") } if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") m <- dim(x)[1] n <- dim(y)[1] N <- max(m,n) M <- min(m,n) Kxx <- kernelMatrix(kernel,x) Kyy <- kernelMatrix(kernel,y) Kxy <- kernelMatrix(kernel,x,y) resmmd <- .submmd(Kxx, Kyy, Kxy, alpha) H0(res) <- (resmmd$mmd1 > resmmd$D1) Radbound(res) <- resmmd$D1 Asymbound(res) <- 0 mmdstats(res)[1] <- resmmd$mmd1 mmdstats(res)[2] <- resmmd$mmd3 if(asymptotic){ boundA <- .submmd3bound(Kxx, Kyy, Kxy, alpha, frac, ntimes, replace) AsympH0(res) <- (resmmd$mmd3 > boundA) Asymbound(res) <- boundA } kernelf(res) <- kernel return(res) }) setMethod("kmmd",signature(x="list"), function(x, y, kernel="stringdot",kpar=list(type="spectrum",length=4), alpha = 0.05, asymptotic = FALSE, replace = TRUE, ntimes = 150, frac = 1, ...) { if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") Kxx <- kernelMatrix(kernel,x) Kyy <- kernelMatrix(kernel,y) Kxy <- kernelMatrix(kernel,x,y) ret <- kmmd(x=Kxx,y = Kyy,Kxy=Kxy, alpha=alpha, asymptotic= asymptotic, replace = replace, ntimes = ntimes, frac= frac) kernelf(ret) <- kernel return(ret) }) setMethod("kmmd",signature(x="kernelMatrix"), function (x, y, Kxy, alpha = 0.05, asymptotic = FALSE, replace = TRUE, ntimes = 100, frac = 1, ...) { res <- new("kmmd") resmmd <- .submmd(x, y, Kxy, alpha) H0(res) <- (resmmd$mmd1 > resmmd$D1) Radbound(res) <- resmmd$D1 Asymbound(res) <- 0 mmdstats(res)[1] <- resmmd$mmd1 mmdstats(res)[2] <- resmmd$mmd3 if(asymptotic){ boundA <- .submmd3bound(x, y, Kxy, alpha, frac, ntimes, replace) AsympH0(res) <- (resmmd$mmd1 > boundA) Asymbound(res) <- boundA } kernelf(res) <- " Kernel matrix used as input." return(res) }) .submmd <- function(Kxx,Kyy, Kxy, alpha) { m <- dim(Kxx)[1] n <- dim(Kyy)[1] N <- max(m,n) M <- min(m,n) sumKxx <- sum(Kxx) if(m!=n) sumKxxM <- sum(Kxx[1:M,1:M]) else sumKxxM <- sumKxx dgxx <- diag(Kxx) sumKxxnd <- sumKxx - sum(dgxx) R <- max(dgxx) RM <- max(dgxx[1:M]) hu <- colSums(Kxx[1:M,1:M]) - dgxx[1:M] sumKyy <- sum(Kyy) if(m!=n) sumKyyM <- sum(Kyy[1:M,1:M]) else sumKyyM <- sumKyy dgyy <- diag(Kyy) sumKyynd <- sum(Kyy) - sum(dgyy) R <- max(R,dgyy) RM <- max(RM,dgyy[1:M]) # RM instead of R in original hu <- hu + colSums(Kyy[1:M,1:M]) - dgyy[1:M] sumKxy <- sum(Kxy) if (m!=n) sumKxyM <- sum(Kxy[1:M,1:M]) else sumKxyM <- sumKxy dg <- diag(Kxy) # up to M only hu <- hu - colSums(Kxy[1:M,1:M]) - colSums(t(Kxy[1:M,1:M])) + 2*dg # one sided sum mmd1 <- sqrt(max(0,sumKxx/(m*m) + sumKyy/(n*n) - 2/m/n* sumKxy)) mmd3 <- sum(hu)/M/(M-1) D1 <- 2*sqrt(RM/M)+sqrt(log(1/alpha)*4*RM/M) return(list(mmd1=mmd1,mmd3=mmd3,D1=D1)) } .submmd3bound <- function(Kxx,Kyy, Kxy, alpha, frac, ntimes, replace) { ## implements the bootstrapping approach to the MMD3 bound by shuffling ## the kernel matrix ## frac : fraction of data used for bootstrap ## ntimes : how many times MMD is to be evaluated m <- dim(Kxx)[1] n <- dim(Kyy)[1] M <- min(m,n) N <- max(m,n) poslabels <- 1:m neglabels <- (m+1):(m+n) ## bootstrap bootmmd3 <- rep(0,ntimes) for (i in 1:ntimes) { nsamples <- ceiling(frac*min(m,n)) xinds <- sample(1:m,nsamples,replace=replace) yinds <- sample(1:n,nsamples,replace=replace) newlab <- c(poslabels[xinds],neglabels[yinds]) samplenew <- sample(newlab, length(newlab), replace=FALSE) xinds <- samplenew[1:nsamples] yinds <- samplenew[(nsamples+1):length(samplenew)] newm <- length(xinds) newn <- length(yinds) newM <- min(newm,newn) ##get new kernel matrices (without concat to big matrix to save memory) xind1 <- xinds[xinds<=m] xind2 <- xinds[xinds>m]- m yind1 <- yinds[yinds<=m] yind2 <- yinds[yinds>m]-m ##Kxx (this should be implemented with kernelMult for memory efficiency) nKxx <- rbind(cbind(Kxx[xind1,xind1],Kxy[xind1,xind2]), cbind(t(Kxy[xind1,xind2]),Kyy[xind2,xind2])) dgxx <- diag(nKxx) hu <- colSums(nKxx[1:newM,1:newM]) - dgxx[1:newM] # one sided sum rm(nKxx) #Kyy nKyy <- rbind(cbind(Kxx[yind1,yind1],Kxy[yind1,yind2]), cbind(t(Kxy[yind1,yind2]), Kyy[yind2,yind2])) dgyy <- diag(nKyy) hu <- hu + colSums(nKyy[1:newM,1:newM]) - dgyy[1:newM] rm(nKyy) ## Kxy nKxy <- rbind(cbind(Kxx[yind1,xind1],Kxy[yind1,xind2]), cbind(t(Kxy[xind1,yind2]),Kyy[yind2,xind2])) dg <- diag(nKxy) hu <- hu - colSums(nKxy[1:newM,1:newM]) - colSums(t(nKxy[1:newM,1:newM])) + 2*dg rm(nKxy) ## now calculate mmd3 bootmmd3[i] <- sum(hu)/newM/(newM-1) } bootmmd3 <- sort(bootmmd3, decreasing=TRUE); aind <- floor(alpha*ntimes) ## better less than too much (-> floor); ## take threshold in between aind and the next smaller value: bound <- sum(bootmmd3[c(aind,aind+1)])/2; return(bound) } setMethod("show","kmmd", function(object){ cat("Kernel Maximum Mean Discrepancy object of class \"kmmd\"","\n","\n") show(kernelf(object)) if(is.logical(object@H0)){ cat("\n") cat("\n","H0 Hypothesis rejected : ", paste(H0(object))) cat("\n","Rademacher bound : ", paste(Radbound(object))) } cat("\n") if(Asymbound(object)!=0){ cat("\n","H0 Hypothesis rejected (based on Asymptotic bound): ", paste(AsympH0(object))) cat("\n","Asymptotic bound : ", paste(Asymbound(object))) } cat("\n","1st and 3rd order MMD Statistics : ", paste( mmdstats(object))) cat("\n") }) kernlab/R/kqr.R0000644000175100001440000002445512676464751013063 0ustar hornikuserssetGeneric("kqr", function(x, ...) standardGeneric("kqr")) setMethod("kqr",signature(x="formula"), function (x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE){ cl <- match.call() m <- match.call(expand.dots = FALSE) if (is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m$... <- NULL m$formula <- m$x m$x <- NULL m[[1L]] <- quote(stats::model.frame) m <- eval(m, parent.frame()) Terms <- attr(m, "terms") attr(Terms, "intercept") <- 0 x <- model.matrix(Terms, m) y <- model.extract(m, "response") if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { remove <- unique(c(which(labels(Terms) %in% names(attr(x, "contrasts"))), which(!scaled) ) ) scaled <- !attr(x, "assign") %in% remove } ret <- kqr(x, y, scaled = scaled, ...) kcall(ret) <- cl terms(ret) <- Terms if (!is.null(attr(m, "na.action"))) n.action(ret) <- attr(m, "na.action") return (ret) }) setMethod("kqr",signature(x="vector"), function(x,...) { x <- t(t(x)) ret <- kqr(x, ...) ret }) setMethod("kqr",signature(x="matrix"), function (x, y, scaled = TRUE, tau = 0.5, C = 0.1, kernel = "rbfdot", kpar = "automatic", reduced = FALSE, rank = dim(x)[1]/6, fit = TRUE, cross = 0, na.action = na.omit) { if((tau > 1)||(tau < 0 )) stop("tau has to be strictly between 0 and 1") ret <- new("kqr") param(ret) <- list(C = C, tau = tau) if (is.null(y)) x <- na.action(x) else { df <- na.action(data.frame(y, x)) y <- df[,1] x <- as.matrix(df[,-1]) } ncols <- ncol(x) m <- nrows <- nrow(x) tmpsc <- NULL x.scale <- y.scale <- NULL ## scaling if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { co <- !apply(x[,scaled, drop = FALSE], 2, var) if (any(co)) { scaled <- rep(FALSE, ncol(x)) warning(paste("Variable(s)", paste("`",colnames(x[,scaled, drop = FALSE])[co], "'", sep="", collapse=" and "), "constant. Cannot scale data.") ) } else { xtmp <- scale(x[,scaled]) x[,scaled] <- xtmp x.scale <- attributes(xtmp)[c("scaled:center","scaled:scale")] y <- scale(y) y.scale <- attributes(y)[c("scaled:center","scaled:scale")] y <- as.vector(y) tmpsc <- list(scaled = scaled, x.scale = x.scale,y.scale = y.scale) } } ## Arrange all the kernel mambo jumpo if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot")) if(is.character(kpar)) if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot") && kpar=="automatic" ) { cat (" Setting default kernel parameters ","\n") kpar <- list() } } if (!is.function(kernel)) if (!is.list(kpar)&&is.character(kpar)&&(class(kernel)=="rbfkernel" || class(kernel) =="laplacedot" || kernel == "laplacedot"|| kernel=="rbfdot")){ kp <- match.arg(kpar,"automatic") if(kp=="automatic") kpar <- list(sigma=mean(sigest(x,scaled=FALSE,frac=1)[c(1,3)])) cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n") } if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") ## Setup QP problem and call ipop if(!reduced) H = kernelMatrix(kernel,x) else H = csi(x, kernel = kernel, rank = rank) c = -y A = rep(1,m) b = 0 r = 0 l = matrix(C * (tau-1),m,1) u = matrix(C * tau ,m,1) qpsol = ipop(c, H, A, b, l, u, r) alpha(ret)= coef(ret) = primal(qpsol) b(ret) = dual(qpsol)[1] ## Compute training error/loss xmatrix(ret) <- x ymatrix(ret) <- y kernelf(ret) <- kernel kpar(ret) <- kpar type(ret) <- ("Quantile Regresion") if (fit){ fitted(ret) <- predict(ret, x) if (!is.null(scaling(ret)$y.scale)) fitted(ret) <- fitted(ret) * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center" error(ret) <- c(pinloss(y, fitted(ret), tau), ramploss(y,fitted(ret),tau)) } else fitted(ret) <- NULL if(any(scaled)) scaling(ret) <- tmpsc ## Crossvalidation cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { pinloss <- 0 ramloss <- 0 crescs <- NULL suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) cret <- kqr(x[cind,],y[cind], tau = tau, C = C, scale = FALSE, kernel = kernel, cross = 0, fit = FALSE) cres <- predict(cret, x[vgr[[i]],]) crescs <- c(crescs,cres) } if (!is.null(scaling(ret)$y.scale)){ crescs <- crescs * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center" ysvgr <- y[unlist(vgr)] * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center" } else ysvgr <- y[unlist(vgr)] pinloss <- drop(pinloss(ysvgr, crescs, tau)) ramloss <- drop(ramloss(ysvgr, crescs, tau)) cross(ret) <- c(pinloss, ramloss) } return(ret) }) setMethod("kqr",signature(x="list"), function (x, y, tau = 0.5, C = 0.1, kernel = "strigdot", kpar = list(length=4, C=0.5), fit = TRUE, cross = 0) { if((tau > 1)||(tau < 0 )) stop("tau has to be strictly between 0 and 1") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") K <- kernelMatrix(kernel,x) ret <- kqr(K,y = y,tau = tau, C = C, fit = fit, cross = cross) kernelf(ret) <- kernel kpar(ret) <- kpar return(ret) }) setMethod("kqr",signature(x="kernelMatrix"), function (x, y, tau = 0.5, C = 0.1, fit = TRUE, cross = 0) { if((tau > 1)||(tau < 0 )) stop("tau has to be strictly between 0 and 1") ret <- new("kqr") param(ret) <- list(C = C, tau = tau) ncols <- ncol(x) m <- nrows <- nrow(x) y <- as.vector(y) ## Setup QP problem and call ipop H = x c = -y A = rep(1,m) b = 0 r = 0 l = matrix(C * (tau-1),m,1) u = matrix(C * tau ,m,1) qpsol = ipop(c, H, A, b, l, u, r) alpha(ret)= coef(ret) = primal(qpsol) b(ret) = dual(qpsol)[1] ## Compute training error/loss ymatrix(ret) <- y kernelf(ret) <- "Kernel Matrix used." type(ret) <- ("Quantile Regresion") if (fit){ fitted(ret) <- predict(ret, x) error(ret) <- c(pinloss(y, fitted(ret), tau), ramploss(y,fitted(ret),tau)) } else NA ## Crossvalidation cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { pinloss <- 0 ramloss <- 0 crescs <- NULL suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) cret <- kqr(x[cind,cind],y[cind], tau = tau, C = C, scale = FALSE, cross = 0, fit = FALSE) cres <- predict(cret, x[vgr[[i]],vgr[[i]]]) crescs <- c(crescs,cres) } ysvgr <- y[unlist(vgr)] pinloss <- drop(pinloss(ysvgr, crescs, tau)) ramloss <- drop(ramloss(ysvgr, crescs, tau)) cross(ret) <- c(pinloss, ramloss) } return(ret) }) pinloss <- function(y,f,tau) { if(is.vector(y)) m <- length(y) else m <- dim(y)[1] tmp <- y - f return((tau *sum(tmp*(tmp>=0)) + (tau-1) * sum(tmp * (tmp<0)))/m) } ramploss <- function(y,f,tau) { if(is.vector(y)) m <- length(y) else m <- dim(y)[1] return(sum(y<=f)/m) } setMethod("predict", signature(object = "kqr"), function (object, newdata) { sc <- 0 if (missing(newdata)) if(!is.null(fitted(object))) return(fitted(object)) else stop("newdata is missing and no fitted values found.") if(!is(newdata,"kernelMatrix")){ ncols <- ncol(xmatrix(object)) nrows <- nrow(xmatrix(object)) oldco <- ncols if (!is.null(terms(object))) { newdata <- model.matrix(delete.response(terms(object)), as.data.frame(newdata), na.action = na.action) } else newdata <- if (is.vector (newdata)) t(t(newdata)) else as.matrix(newdata) newcols <- 0 newnrows <- nrow(newdata) newncols <- ncol(newdata) newco <- newncols if (oldco != newco) stop ("test vector does not match model !") if (is.list(scaling(object)) && sc != 1) newdata[,scaling(object)$scaled] <- scale(newdata[,scaling(object)$scaled, drop = FALSE], center = scaling(object)$x.scale$"scaled:center", scale = scaling(object)$x.scale$"scaled:scale" ) predres <- kernelMult(kernelf(object),newdata,xmatrix(object),as.matrix(alpha(object))) - b(object) if (!is.null(scaling(object)$y.scale)) return(predres * scaling(object)$y.scale$"scaled:scale" + scaling(object)$y.scale$"scaled:center") else return(predres) } else { return(newdata%*%alpha(object) - b(object)) } }) setMethod("show","kqr", function(object){ cat("Kernel Quantile Regression object of class \"kqr\"","\n") cat("\n") show(kernelf(object)) cat("\n") cat("Regularization Cost Parameter C: ",round(param(object)[[1]],9)) cat(paste("\nNumber of training instances learned :", dim(xmatrix(object))[1],"\n")) if(!is.null(fitted(object))) cat(paste("Train error :"," pinball loss : ", round(error(object)[1],9)," rambloss :", round(error(object)[2],9),"\n")) ##train error & loss if(cross(object)!=-1) cat("Cross validation error :", " pinballoss : ", round(cross(object)[1],9)," rambloss :", round(cross(object)[2],9),"\n") }) kernlab/R/onlearn.R0000644000175100001440000001667712560371302013710 0ustar hornikusers## kernel based on-line learning algorithms for classification, novelty detection and regression. ## ## created 15.09.04 alexandros ## updated setGeneric("onlearn",function(obj, x, y = NULL, nu = 0.2, lambda = 1e-4) standardGeneric("onlearn")) setMethod("onlearn", signature(obj = "onlearn"), function(obj , x, y = NULL, nu = 0.2, lambda = 1e-4) { if(onstart(obj) == 1 && onstop(obj) < buffer(obj)) buffernotfull <- TRUE else buffernotfull <- FALSE if(is.vector(x)) x <- matrix(x,,length(x)) d <- dim(x)[2] for (i in 1:dim(x)[1]) { xt <- x[i,,drop=FALSE] yt <- y[i] if(type(obj)=="novelty") { phi <- fit(obj) if(phi < 0) { alpha(obj) <- (1-lambda) * alpha(obj) if(buffernotfull) onstop(obj) <- onstop(obj) + 1 else{ onstop(obj) <- onstop(obj)%%buffer(obj) + 1 onstart(obj) <- onstart(obj)%%buffer(obj) +1 } alpha(obj)[onstop(obj)] <- lambda xmatrix(obj)[onstop(obj),] <- xt rho(obj) <- rho(obj) + lambda*(nu-1) } else rho(obj) <- rho(obj) + lambda*nu rho(obj) <- max(rho(obj), 0) if(onstart(obj) == 1 && onstop(obj) < buffer(obj)) fit(obj) <- drop(kernelMult(kernelf(obj), xt, matrix(xmatrix(obj)[1:onstop(obj),],ncol=d), matrix(alpha(obj)[1:onstop(obj)],ncol=1)) - rho(obj)) else fit(obj) <- drop(kernelMult(kernelf(obj), xt, xmatrix(obj), matrix(alpha(obj),ncol=1)) - rho(obj)) } if(type(obj)=="classification") { if(is.null(pattern(obj)) && is.factor(y)) pattern(obj) <- yt if(!is.null(pattern(obj))) if(pattern(obj) == yt) yt <- 1 else yt <- -1 phi <- fit(obj) alpha(obj) <- (1-lambda) * alpha(obj) if(yt*phi < rho(obj)) { if(buffernotfull) onstop(obj) <- onstop(obj) + 1 else{ onstop(obj) <- onstop(obj)%%buffer(obj) + 1 onstart(obj) <- onstart(obj)%%buffer(obj) +1 } alpha(obj)[onstop(obj)] <- lambda*yt b(obj) <- b(obj) + lambda*yt xmatrix(obj)[onstop(obj),] <- xt rho(obj) <- rho(obj) + lambda*(nu-1) ## (1-nu) ?? } else rho(obj) <- rho(obj) + lambda*nu rho(obj) <- max(rho(obj), 0) if(onstart(obj) == 1 && onstop(obj) < buffer(obj)) fit(obj) <- drop(kernelMult(kernelf(obj), xt, xmatrix(obj)[1:onstop(obj),,drop=FALSE], matrix(alpha(obj)[1:onstop(obj)],ncol=1)) + b(obj)) else fit(obj) <-drop(kernelMult(kernelf(obj), xt, xmatrix(obj), matrix(alpha(obj),ncol=1)) + b(obj)) } if(type(obj)=="regression") { alpha(obj) <- (1-lambda) * alpha(obj) phi <- fit(obj) if(abs(-phi) < rho(obj)) { if(buffernotfull) onstop(obj) <- onstop(obj) + 1 else{ onstop(obj) <- onstop(obj)%%buffer(obj) + 1 onstart(obj) <- onstart(obj)%% buffer(obj) +1 } alpha(obj)[onstop(obj)] <- sign(yt-phi)*lambda xmatrix(obj)[onstop(obj),] <- xt rho(obj) <- rho(obj) + lambda*(1-nu) ## (1-nu) ?? } else{ rho(obj) <- rho(obj) - lambda*nu alpha(obj)[onstop(obj)] <- sign(yt-phi)/rho(obj) } if(onstart(obj) == 1 && onstop(obj) < buffer(obj)) fit(obj) <- drop(kernelMult(kernelf(obj), xt, matrix(xmatrix(obj)[1:onstop(obj),],ncol=d), matrix(alpha(obj)[1:onstop(obj)],ncol=1)) + b(obj)) else fit(obj) <- drop(kernelMult(kernelf(obj), xt, xmatrix(obj), matrix(alpha(obj),ncol=1)) + b(obj)) } } return(obj) }) setGeneric("inlearn",function(d, kernel = "rbfdot", kpar = list(sigma=0.1), type = "novelty", buffersize = 1000) standardGeneric("inlearn")) setMethod("inlearn", signature(d = "numeric"), function(d ,kernel = "rbfdot", kpar = list(sigma=0.1), type = "novelty", buffersize = 1000) { obj <- new("onlearn") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") type(obj) <- match.arg(type,c("novelty","classification","regression")) xmatrix(obj) <- matrix(0,buffersize,d) kernelf(obj) <- kernel onstart(obj) <- 1 onstop(obj) <- 1 fit(obj) <- 0 b(obj) <- 0 alpha(obj) <- rep(0, buffersize) rho(obj) <- 0 buffer(obj) <- buffersize return(obj) }) setMethod("show","onlearn", function(object){ cat("On-line learning object of class \"onlearn\"","\n") cat("\n") cat(paste("Learning problem :", type(object), "\n")) cat cat(paste("Data dimensions :", dim(xmatrix(object))[2], "\n")) cat(paste("Buffersize :", buffer(object), "\n")) cat("\n") show(kernelf(object)) }) setMethod("predict",signature(object="onlearn"), function(object, x) { if(is.vector(x)) x<- matrix(x,1) d <- dim(xmatrix(object))[2] if(type(object)=="novelty") { if(onstart(object) == 1 && onstop(object) < buffer(object)) res <- drop(kernelMult(kernelf(object), x, matrix(xmatrix(object)[1:onstop(object),],ncol= d), matrix(alpha(object)[1:onstop(object)],ncol=1)) - rho(object)) else res <- drop(kernelMult(kernelf(object), x, matrix(xmatrix(object),ncol=d), matrix(alpha(object),ncol=1)) - rho(object)) } if(type(object)=="classification") { if(onstart(object) == 1 && onstop(object) < buffer(object)) res <- drop(kernelMult(kernelf(object), x, matrix(xmatrix(object)[1:onstop(object),],ncol=d), matrix(alpha(object)[1:onstop(object)],ncol=1)) + b(object)) else res <- drop(kernelMult(kernelf(object), x, matrix(xmatrix(object),ncol=d), matrix(alpha(object),ncol=1)) + b(object)) } if(type(object)=="regression") { if(onstart(object) == 1 && onstop(object) < buffer(object)) res <- drop(kernelMult(kernelf(object), x, matrix(xmatrix(object)[1:onstop(object),],ncol=d), matrix(alpha(object)[1:onstop(object)],ncol=1)) + b(object)) else res <- drop(kernelMult(kernelf(object), x, matrix(xmatrix(object),ncol=d), matrix(alpha(object),ncol=1)) + b(object)) } return(res) }) kernlab/R/kha.R0000644000175100001440000001042612676464711013016 0ustar hornikusers #Kernel Hebbian Algorithm function setGeneric("kha",function(x, ...) standardGeneric("kha")) setMethod("kha", signature(x = "formula"), function(x, data = NULL, na.action = na.omit, ...) { mt <- terms(x, data = data) if(attr(mt, "response") > 0) stop("response not allowed in formula") attr(mt, "intercept") <- 0 cl <- match.call() mf <- match.call(expand.dots = FALSE) mf$formula <- mf$x mf$... <- NULL mf[[1L]] <- quote(stats::model.frame) mf <- eval(mf, parent.frame()) na.act <- attr(mf, "na.action") Terms <- attr(mf, "terms") x <- model.matrix(mt, mf) res <- kha(x, ...) ## fix up call to refer to the generic, but leave arg name as `formula' cl[[1]] <- as.name("kha") kcall(res) <- cl attr(Terms,"intercept") <- 0 terms(res) <- Terms if(!is.null(na.act)) n.action(res) <- na.act return(res) }) setMethod("kha",signature(x="matrix"), function(x, kernel = "rbfdot", kpar = list(sigma = 0.1), features = 5, eta = 0.005, th = 1e-4, maxiter = 10000, verbose = FALSE, na.action = na.omit, ...) { x <- na.action(x) x <- as.matrix(x) m <- nrow(x) ret <- new("kha") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") ## Initialize A dual variables A <- matrix(runif(features*m),m,features)*2 - 1 AOld <- A ## compute square norm of data a <- rowSums(x^2) ## initialize the empirical sum kernel map eskm <- rep(0,m) for (i in 1:m) eskm[i] <- sum(kernelFast(kernel,x,x[i,,drop=FALSE], a)) eks <- sum(eskm) counter <- 0 step <- th + 1 Aold <- A while(step > th && counter < maxiter) { y <- rep(0, features) ot <- rep(0,m) ## Hebbian Iteration for (i in 1:m) { ## compute y output etkm <- as.vector(kernelFast(kernel,x,x[i,,drop=FALSE], a)) sum1 <- as.vector(etkm %*% A) sum2 <- as.vector(eskm%*%A)/m asum <- colSums(A) sum3 <- as.vector(eskm[i]*asum)/m sum4 <- as.vector(eks * asum)/m^2 y <- sum1 - sum2 - sum3 + sum4 ## update A yy <- y%*%t(y) yy[upper.tri(yy)] <- 0 tA <- t(A) A <- t(tA - eta * yy%*%tA) A[i,] <- A[i,] + eta * y } if (counter %% 100 == 0 ) { step = mean(abs(Aold - A)) Aold <- A if(verbose) cat("Iteration :", counter, "Converged :", step,"\n") } counter <- counter + 1 } ## Normalize in Feature space cA <- t(A) - colSums(A) Fnorm <- rep(0,features) for (j in 1:m) Fnorm <- Fnorm + colSums(t(cA[,j] * cA) * as.vector(kernelFast(kernel,x,x[j,,drop=FALSE],a))) if(any(Fnorm==0)) { warning("Normalization vector contains zeros, replacing them with ones") Fnorm[which(Fnorm==0)] <- 1 } A <- t(t(A)/sqrt(Fnorm)) pcv(ret) <- A eig(ret) <- Fnorm names(eig(ret)) <- paste("Comp.", 1:features, sep = "") eskm(ret) <- eskm kcall(ret) <- match.call() kernelf(ret) <- kernel xmatrix(ret) <- x return(ret) }) ## Project a new matrix into the feature space setMethod("predict",signature(object="kha"), function(object , x) { if (!is.null(terms(object))) { if(!is.matrix(x)) x <- model.matrix(delete.response(terms(object)), as.data.frame(x), na.action = n.action(object)) } else x <- if (is.vector(x)) t(t(x)) else as.matrix(x) if (is.vector(x)||is.data.frame(x)) x<-as.matrix(x) if (!is.matrix(x)) stop("x must be a matrix a vector or a data frame") n <- nrow(x) m <- nrow(xmatrix(object)) A <- pcv(object) y <- matrix(0,n,dim(A)[2]) eks <- sum(eskm(object)) a <- rowSums(xmatrix(object)^2) ## Project data sum2 <- as.vector(eskm(object)%*%A)/m asum <- colSums(A) sum4 <- as.vector(eks * asum)/m^2 for (i in 1:n) { ## compute y output etkm <- as.vector(kernelFast(kernelf(object),xmatrix(object),x[i,,drop=FALSE], a)) sum1 <- as.vector(etkm %*% A) sum3 <- sum(etkm)*asum/m y[i,] <- sum1 - sum2 - sum3 + sum4 } return(y) }) kernlab/R/aobjects.R0000644000175100001440000010724112055335057014036 0ustar hornikusers## S4 object definitions and assigment/accessor functions for the slots. ## ## created 10.09.03 alexandros karatzoglou ## updated 23.08.05 setClass("kernel",representation("function",kpar="list")) setClass("kernelMatrix",representation("matrix"),prototype=structure(.Data=matrix())) setClassUnion("listI", c("list","numeric","vector","integer","matrix")) setClassUnion("output", c("matrix","factor","vector","logical","numeric","list","integer","NULL")) setClassUnion("input", c("matrix","list")) setClassUnion("kfunction", c("function","character")) setClassUnion("mpinput", c("matrix","data.frame","missing")) setClassUnion("lpinput", c("list","missing")) setClassUnion("kpinput", c("kernelMatrix","missing")) setClass("vm", representation(alpha = "listI", ## since setClassUnion is not working type = "character", kernelf = "kfunction", kpar = "list", xmatrix = "input", ymatrix = "output", fitted = "output", lev = "vector", nclass = "numeric", error = "vector", cross = "vector", n.action= "ANY", terms = "ANY", kcall = "call"), contains= "VIRTUAL") #Generic Vector Machine object if(!isGeneric("type")){ if (is.function("type")) fun <- type else fun <- function(object) standardGeneric("type") setGeneric("type", fun) } setMethod("type", "vm", function(object) object@type) setGeneric("type<-", function(x, value) standardGeneric("type<-")) setReplaceMethod("type", "vm", function(x, value) { x@type <- value x }) if(!isGeneric("kernelf")){ if (is.function("kernelf")) fun <- kernelf else fun <- function(object) standardGeneric("kernelf") setGeneric("kernelf", fun) } setMethod("kernelf", "vm", function(object) object@kernelf) setGeneric("kernelf<-", function(x, value) standardGeneric("kernelf<-")) setReplaceMethod("kernelf", "vm", function(x, value) { x@kernelf <- value x }) if(!isGeneric("kpar")){ if (is.function("kpar")) fun <- kpar else fun <- function(object) standardGeneric("kpar") setGeneric("kpar", fun) } setMethod("kpar", "vm", function(object) object@kpar) setGeneric("kpar<-", function(x, value) standardGeneric("kpar<-")) setReplaceMethod("kpar", "vm", function(x, value) { x@kpar <- value x }) if(!isGeneric("kcall")){ if (is.function("kcall")) fun <- kcall else fun <- function(object) standardGeneric("kcall") setGeneric("kcall", fun) } setMethod("kcall", "vm", function(object) object@kcall) setGeneric("kcall<-", function(x, value) standardGeneric("kcall<-")) setReplaceMethod("kcall", "vm", function(x, value) { x@kcall <- value x }) setMethod("terms", "vm", function(x, ...) x@terms) setGeneric("terms<-", function(x, value) standardGeneric("terms<-")) setReplaceMethod("terms", "vm", function(x, value) { x@terms <- value x }) if(!isGeneric("xmatrix")){ if (is.function("xmatrix")) fun <- xmatrix else fun <- function(object) standardGeneric("xmatrix") setGeneric("xmatrix", fun) } setMethod("xmatrix", "vm", function(object) object@xmatrix) setGeneric("xmatrix<-", function(x, value) standardGeneric("xmatrix<-")) setReplaceMethod("xmatrix", "vm", function(x, value) { x@xmatrix <- value x }) if(!isGeneric("ymatrix")){ if (is.function("ymatrix")) fun <- ymatrix else fun <- function(object) standardGeneric("ymatrix") setGeneric("ymatrix", fun) } setMethod("ymatrix", "vm", function(object) object@ymatrix) setGeneric("ymatrix<-", function(x, value) standardGeneric("ymatrix<-")) setReplaceMethod("ymatrix", "vm", function(x, value) { x@ymatrix <- value x }) setMethod("fitted", "vm", function(object, ...) object@fitted) setGeneric("fitted<-", function(x, value) standardGeneric("fitted<-")) setReplaceMethod("fitted", "vm", function(x, value) { x@fitted <- value x }) if(!isGeneric("lev")){ if (is.function("lev")) fun <- lev else fun <- function(object) standardGeneric("lev") setGeneric("lev", fun) } setMethod("lev", "vm", function(object) object@lev) setGeneric("lev<-", function(x, value) standardGeneric("lev<-")) setReplaceMethod("lev", "vm", function(x, value) { x@lev <- value x }) if(!isGeneric("nclass")){ if (is.function("nclass")) fun <- nclass else fun <- function(object) standardGeneric("nclass") setGeneric("nclass", fun) } setMethod("nclass", "vm", function(object) object@nclass) setGeneric("nclass<-", function(x, value) standardGeneric("nclass<-")) setReplaceMethod("nclass", "vm", function(x, value) { x@nclass <- value x }) if(!isGeneric("alpha")){ if (is.function("alpha")) fun <- alpha else fun <- function(object) standardGeneric("alpha") setGeneric("alpha", fun) } setMethod("alpha", "vm", function(object) object@alpha) setGeneric("alpha<-", function(x, value) standardGeneric("alpha<-")) setReplaceMethod("alpha", "vm", function(x, value) { x@alpha <- value x }) if(!isGeneric("error")){ if (is.function("error")) fun <- error else fun <- function(object) standardGeneric("error") setGeneric("error", fun) } setMethod("error", "vm", function(object) object@error) setGeneric("error<-", function(x, value) standardGeneric("error<-")) setReplaceMethod("error", "vm", function(x, value) { x@error <- value x }) if(!isGeneric("cross")){ if (is.function("cross")) fun <- cross else fun <- function(object) standardGeneric("cross") setGeneric("cross", fun) } setMethod("cross", "vm", function(object) object@cross) setGeneric("cross<-", function(x, value) standardGeneric("cross<-")) setReplaceMethod("cross", "vm", function(x, value) { x@cross <- value x }) if(!isGeneric("n.action")){ if (is.function("n.action")) fun <- n.action else fun <- function(object) standardGeneric("n.action") setGeneric("n.action", fun) } setMethod("n.action", "vm", function(object) object@n.action) setGeneric("n.action<-", function(x, value) standardGeneric("n.action<-")) setReplaceMethod("n.action", "vm", function(x, value) { x@n.action <- value x }) setClass("ksvm", representation(param = "list", scaling = "ANY", coef = "ANY", alphaindex = "ANY", b = "numeric", obj = "vector", SVindex = "vector", nSV = "numeric", prior = "list", prob.model = "list" ), contains="vm") if(!isGeneric("param")){ if (is.function("param")) fun <- param else fun <- function(object) standardGeneric("param") setGeneric("param", fun) } setMethod("param", "ksvm", function(object) object@param) setGeneric("param<-", function(x, value) standardGeneric("param<-")) setReplaceMethod("param", "ksvm", function(x, value) { x@param <- value x }) if(!isGeneric("scaling")){ if (is.function("scaling")) fun <- scaling else fun <- function(object) standardGeneric("scaling") setGeneric("scaling", fun) } setMethod("scaling", "ksvm", function(object) object@scaling) setGeneric("scaling<-", function(x, value) standardGeneric("scaling<-")) setReplaceMethod("scaling", "ksvm", function(x, value) { x@scaling<- value x }) if(!isGeneric("obj")){ if (is.function("obj")) fun <- obj else fun <- function(object) standardGeneric("obj") setGeneric("obj", fun) } setMethod("obj", "ksvm", function(object) object@obj) setGeneric("obj<-", function(x, value) standardGeneric("obj<-")) setReplaceMethod("obj", "ksvm", function(x, value) { x@obj<- value x }) setMethod("coef", "ksvm", function(object, ...) object@coef) setGeneric("coef<-", function(x, value) standardGeneric("coef<-")) setReplaceMethod("coef", "ksvm", function(x, value) { x@coef <- value x }) if(!isGeneric("alphaindex")){ if (is.function("alphaindex")) fun <- alphaindex else fun <- function(object) standardGeneric("alphaindex") setGeneric("alphaindex", fun) } setMethod("alphaindex", "ksvm", function(object) object@alphaindex) setGeneric("alphaindex<-", function(x, value) standardGeneric("alphaindex<-")) setReplaceMethod("alphaindex", "ksvm", function(x, value) { x@alphaindex <- value x }) if(!isGeneric("b")){ if (is.function("b")) fun <- b else fun <- function(object) standardGeneric("b") setGeneric("b", fun) } setMethod("b", "ksvm", function(object) object@b) setGeneric("b<-", function(x, value) standardGeneric("b<-")) setReplaceMethod("b", "ksvm", function(x, value) { x@b <- value x }) if(!isGeneric("SVindex")){ if (is.function("SVindex")) fun <- SVindex else fun <- function(object) standardGeneric("SVindex") setGeneric("SVindex", fun) } setMethod("SVindex", "ksvm", function(object) object@SVindex) setGeneric("SVindex<-", function(x, value) standardGeneric("SVindex<-")) setReplaceMethod("SVindex", "ksvm", function(x, value) { x@SVindex <- value x }) if(!isGeneric("nSV")){ if (is.function("nSV")) fun <- nSV else fun <- function(object) standardGeneric("nSV") setGeneric("nSV", fun) } setMethod("nSV", "ksvm", function(object) object@nSV) setGeneric("nSV<-", function(x, value) standardGeneric("nSV<-")) setReplaceMethod("nSV", "ksvm", function(x, value) { x@nSV <- value x }) if(!isGeneric("prior")){ if (is.function("prior")) fun <- prior else fun <- function(object) standardGeneric("prior") setGeneric("prior", fun) } setMethod("prior", "ksvm", function(object) object@prior) setGeneric("prior<-", function(x, value) standardGeneric("prior<-")) setReplaceMethod("prior", "ksvm", function(x, value) { x@prior <- value x }) if(!isGeneric("prob.model")){ if (is.function("prob.model")) fun <- prob.model else fun <- function(object) standardGeneric("prob.model") setGeneric("prob.model", fun) } setMethod("prob.model", "ksvm", function(object) object@prob.model) setGeneric("prob.model<-", function(x, value) standardGeneric("prob.model<-")) setReplaceMethod("prob.model", "ksvm", function(x, value) { x@prob.model <- value x }) setClass("lssvm", representation(param = "list", scaling = "ANY", coef = "ANY", alphaindex = "ANY", ## prob.model = "list", b = "numeric", nSV = "numeric" ), contains="vm") ##setMethod("prob.model", "lssvm", function(object) object@prob.model) ##setGeneric("prob.model<-", function(x, value) standardGeneric("prob.model<-")) ##setReplaceMethod("prob.model", "lssvm", function(x, value) { ## x@prob.model <- value ## x ##}) setMethod("param", "lssvm", function(object) object@param) setReplaceMethod("param", "lssvm", function(x, value) { x@param <- value x }) setMethod("scaling", "lssvm", function(object) object@scaling) setReplaceMethod("scaling", "lssvm", function(x, value) { x@scaling<- value x }) setMethod("coef", "lssvm", function(object, ...) object@coef) setReplaceMethod("coef", "lssvm", function(x, value) { x@coef <- value x }) setMethod("alphaindex", "lssvm", function(object) object@alphaindex) setReplaceMethod("alphaindex", "lssvm", function(x, value) { x@alphaindex <- value x }) setMethod("b", "lssvm", function(object) object@b) setReplaceMethod("b", "lssvm", function(x, value) { x@b <- value x }) setMethod("nSV", "lssvm", function(object) object@nSV) setReplaceMethod("nSV", "lssvm", function(x, value) { x@nSV <- value x }) setClass("kqr", representation(param = "list", scaling = "ANY", coef = "ANY", b = "numeric" ), contains="vm") setMethod("b", "kqr", function(object) object@b) setReplaceMethod("b", "kqr", function(x, value) { x@b <- value x }) setMethod("scaling", "kqr", function(object) object@scaling) setReplaceMethod("scaling", "kqr", function(x, value) { x@scaling <- value x }) setMethod("coef", "kqr", function(object) object@coef) setReplaceMethod("coef", "kqr", function(x, value) { x@coef <- value x }) setMethod("param", "kqr", function(object) object@param) setReplaceMethod("param", "kqr", function(x, value) { x@param <- value x }) ## failed attempt to get rid of all this above ## mkaccesfun <- function(cls) #{ # snames <- slotNames(cls) ## # # for(i in 1:length(snames)) # { resF <- paste("\"",snames[i],"\"",sep="") # if(!isGeneric(snames[i])) # eval(parse(file="",text=paste("setGeneric(",resF,",function(object)","standardGeneric(",resF,")",")",sep=" "))) # setGeneric(snames[i], function(object) standardGeneric(snames[i])) # # setMethod(snames[i], cls, function(object) eval(parse(file="",text=paste("object@",snames[i],sep="")))) # resG <- paste("\"",snames[i],"<-","\"",sep="") #eval(parse(file="",text=paste("setGeneric(",resG,",function(x, value)","standardGeneric(",resG,")",")",sep=" "))) # setReplaceMethod(snames[i], cls, function(x, value) { # eval(parse(file="",text=paste("x@",snames[i],"<-value",sep=""))) # x # }) # } #} setClass("prc", representation(pcv = "matrix", eig = "vector", kernelf = "kfunction", kpar = "list", xmatrix = "input", kcall = "ANY", terms = "ANY", n.action = "ANY"),contains="VIRTUAL") #accessor functions if(!isGeneric("pcv")){ if (is.function("pcv")) fun <- pcv else fun <- function(object) standardGeneric("pcv") setGeneric("pcv", fun) } setMethod("pcv", "prc", function(object) object@pcv) setGeneric("pcv<-", function(x, value) standardGeneric("pcv<-")) setReplaceMethod("pcv", "prc", function(x, value) { x@pcv <- value x }) if(!isGeneric("eig")){ if (is.function("eig")) fun <- eig else fun <- function(object) standardGeneric("eig") setGeneric("eig", fun) } setMethod("eig", "prc", function(object) object@eig) setGeneric("eig<-", function(x, value) standardGeneric("eig<-")) setReplaceMethod("eig", "prc", function(x, value) { x@eig <- value x }) setMethod("kernelf","prc", function(object) object@kernelf) setReplaceMethod("kernelf","prc", function(x, value){ x@kernelf <- value x }) setMethod("xmatrix","prc", function(object) object@xmatrix) setReplaceMethod("xmatrix","prc", function(x, value){ x@xmatrix <- value x }) setMethod("kcall","prc", function(object) object@kcall) setReplaceMethod("kcall","prc", function(x, value){ x@kcall <- value x }) setMethod("terms","prc", function(x, ...) x@terms) setReplaceMethod("terms","prc", function(x, value){ x@terms <- value x }) setMethod("n.action","prc", function(object) object@n.action) setReplaceMethod("n.action","prc", function(x, value){ x@n.action <- value x }) ##kernel principal components object setClass("kpca", representation(rotated = "matrix"),contains="prc") #accessor functions if(!isGeneric("rotated")){ if (is.function("rotated")) fun <- rotated else fun <- function(object) standardGeneric("rotated") setGeneric("rotated", fun) } setMethod("rotated", "kpca", function(object) object@rotated) setGeneric("rotated<-", function(x, value) standardGeneric("rotated<-")) setReplaceMethod("rotated", "kpca", function(x, value) { x@rotated <- value x }) ## kernel maximum mean discrepancy setClass("kmmd", representation(H0="logical", AsympH0 ="logical", kernelf = "kfunction", Asymbound="numeric", Radbound="numeric", xmatrix="input", mmdstats="vector")) if(!isGeneric("mmdstats")){ if (is.function("mmdstats")) fun <- mmdstats else fun <- function(object) standardGeneric("mmdstats") setGeneric("mmdstats", fun) } setMethod("mmdstats","kmmd", function(object) object@mmdstats) setGeneric("mmdstats<-", function(x, value) standardGeneric("mmdstats<-")) setReplaceMethod("mmdstats","kmmd", function(x, value){ x@mmdstats <- value x }) if(!isGeneric("Radbound")){ if (is.function("Radbound")) fun <- Radbound else fun <- function(object) standardGeneric("Radbound") setGeneric("Radbound", fun) } setMethod("Radbound","kmmd", function(object) object@Radbound) setGeneric("Radbound<-", function(x, value) standardGeneric("Radbound<-")) setReplaceMethod("Radbound","kmmd", function(x, value){ x@Radbound <- value x }) if(!isGeneric("Asymbound")){ if (is.function("Asymbound")) fun <- Asymbound else fun <- function(object) standardGeneric("Asymbound") setGeneric("Asymbound", fun) } setMethod("Asymbound","kmmd", function(object) object@Asymbound) setGeneric("Asymbound<-", function(x, value) standardGeneric("Asymbound<-")) setReplaceMethod("Asymbound","kmmd", function(x, value){ x@Asymbound <- value x }) if(!isGeneric("H0")){ if (is.function("H0")) fun <- H0 else fun <- function(object) standardGeneric("H0") setGeneric("H0", fun) } setMethod("H0","kmmd", function(object) object@H0) setGeneric("H0<-", function(x, value) standardGeneric("H0<-")) setReplaceMethod("H0","kmmd", function(x, value){ x@H0 <- value x }) if(!isGeneric("AsympH0")){ if (is.function("AsympH0")) fun <- AsympH0 else fun <- function(object) standardGeneric("AsympH0") setGeneric("AsympH0", fun) } setMethod("AsympH0","kmmd", function(object) object@AsympH0) setGeneric("AsympH0<-", function(x, value) standardGeneric("AsympH0<-")) setReplaceMethod("AsympH0","kmmd", function(x, value){ x@AsympH0 <- value x }) setMethod("kernelf","kmmd", function(object) object@kernelf) setReplaceMethod("kernelf","kmmd", function(x, value){ x@kernelf <- value x }) setClass("ipop", representation(primal = "vector", dual = "numeric", how = "character" )) if(!isGeneric("primal")){ if (is.function("primal")) fun <- primal else fun <- function(object) standardGeneric("primal") setGeneric("primal", fun) } setMethod("primal", "ipop", function(object) object@primal) setGeneric("primal<-", function(x, value) standardGeneric("primal<-")) setReplaceMethod("primal", "ipop", function(x, value) { x@primal <- value x }) if(!isGeneric("dual")){ if (is.function("dual")) fun <- dual else fun <- function(object) standardGeneric("dual") setGeneric("dual", fun) } setMethod("dual", "ipop", function(object) object@dual) setGeneric("dual<-", function(x, value) standardGeneric("dual<-")) setReplaceMethod("dual", "ipop", function(x, value) { x@dual <- value x }) if(!isGeneric("how")){ if (is.function("how")) fun <- how else fun <- function(object) standardGeneric("how") setGeneric("how", fun) } setMethod("how", "ipop", function(object) object@how) setGeneric("how<-", function(x, value) standardGeneric("how<-")) setReplaceMethod("how", "ipop", function(x, value) { x@how <- value x }) # Kernel Canonical Correlation Analysis setClass("kcca", representation(kcor = "vector", xcoef = "matrix", ycoef = "matrix" ##xvar = "matrix", ##yvar = "matrix" )) if(!isGeneric("kcor")){ if (is.function("kcor")) fun <- kcor else fun <- function(object) standardGeneric("kcor") setGeneric("kcor", fun) } setMethod("kcor", "kcca", function(object) object@kcor) setGeneric("kcor<-", function(x, value) standardGeneric("kcor<-")) setReplaceMethod("kcor", "kcca", function(x, value) { x@kcor <- value x }) if(!isGeneric("xcoef")){ if (is.function("xcoef")) fun <- xcoef else fun <- function(object) standardGeneric("xcoef") setGeneric("xcoef", fun) } setMethod("xcoef", "kcca", function(object) object@xcoef) setGeneric("xcoef<-", function(x, value) standardGeneric("xcoef<-")) setReplaceMethod("xcoef", "kcca", function(x, value) { x@xcoef <- value x }) if(!isGeneric("ycoef")){ if (is.function("ycoef")) fun <- ycoef else fun <- function(object) standardGeneric("ycoef") setGeneric("ycoef", fun) } setMethod("ycoef", "kcca", function(object) object@ycoef) setGeneric("ycoef<-", function(x, value) standardGeneric("ycoef<-")) setReplaceMethod("ycoef", "kcca", function(x, value) { x@ycoef <- value x }) ##if(!isGeneric("xvar")){ ## if (is.function("xvar")) ## fun <- xvar ## else fun <- function(object) standardGeneric("xvar") ## setGeneric("xvar", fun) ##} ##setMethod("xvar", "kcca", function(object) object@xvar) ##setGeneric("xvar<-", function(x, value) standardGeneric("xvar<-")) ##setReplaceMethod("xvar", "kcca", function(x, value) { ## x@xvar <- value ## x ##}) ##if(!isGeneric("yvar")){ ## if (is.function("yvar")) ## fun <- yvar ## else fun <- function(object) standardGeneric("yvar") ## setGeneric("yvar", fun) ##} ##setMethod("yvar", "kcca", function(object) object@yvar) ##setGeneric("yvar<-", function(x, value) standardGeneric("yvar<-")) ##setReplaceMethod("yvar", "kcca", function(x, value) { ## x@yvar <- value ## x ##}) ## Gaussian Processes object setClass("gausspr",representation(tol = "numeric", scaling = "ANY", sol = "matrix", alphaindex="list", nvar = "numeric" ),contains="vm") setMethod("alphaindex","gausspr", function(object) object@alphaindex) setReplaceMethod("alphaindex","gausspr", function(x, value){ x@alphaindex <- value x }) if(!isGeneric("sol")){ if (is.function("sol")) fun <- sol else fun <- function(object) standardGeneric("sol") setGeneric("sol", fun) } setMethod("sol","gausspr", function(object) object@sol) setGeneric("sol<-", function(x, value) standardGeneric("sol<-")) setReplaceMethod("sol","gausspr", function(x, value){ x@sol <- value x }) setMethod("scaling","gausspr", function(object) object@scaling) setReplaceMethod("scaling","gausspr", function(x, value){ x@scaling <- value x }) setMethod("coef", "gausspr", function(object, ...) object@alpha) # Relevance Vector Machine object setClass("rvm", representation(tol = "numeric", nvar = "numeric", mlike = "numeric", RVindex = "vector", coef = "ANY", nRV = "numeric"),contains ="vm") if(!isGeneric("tol")){ if (is.function("tol")) fun <- tol else fun <- function(object) standardGeneric("tol") setGeneric("tol", fun) } setMethod("tol", "rvm", function(object) object@tol) setGeneric("tol<-", function(x, value) standardGeneric("tol<-")) setReplaceMethod("tol", "rvm", function(x, value) { x@tol <- value x }) setMethod("coef", "rvm", function(object, ...) object@coef) setReplaceMethod("coef", "rvm", function(x, value) { x@coef <- value x }) if(!isGeneric("RVindex")){ if (is.function("RVindex")) fun <- RVindex else fun <- function(object) standardGeneric("RVindex") setGeneric("RVindex", fun) } setMethod("RVindex", "rvm", function(object) object@RVindex) setGeneric("RVindex<-", function(x, value) standardGeneric("RVindex<-")) setReplaceMethod("RVindex", "rvm", function(x, value) { x@RVindex <- value x }) if(!isGeneric("nvar")){ if (is.function("nvar")) fun <- nvar else fun <- function(object) standardGeneric("nvar") setGeneric("nvar", fun) } setMethod("nvar", "rvm", function(object) object@nvar) setGeneric("nvar<-", function(x, value) standardGeneric("nvar<-")) setReplaceMethod("nvar", "rvm", function(x, value) { x@nvar <- value x }) if(!isGeneric("nRV")){ if (is.function("nRV")) fun <- nRV else fun <- function(object) standardGeneric("nRV") setGeneric("nRV", fun) } setMethod("nRV", "rvm", function(object) object@nRV) setGeneric("nRV<-", function(x, value) standardGeneric("nRV<-")) setReplaceMethod("nRV", "rvm", function(x, value) { x@nRV <- value x }) setMethod("coef", "rvm", function(object, ...) object@alpha) if(!isGeneric("mlike")){ if (is.function("mlike")) fun <- mlike else fun <- function(object) standardGeneric("mlike") setGeneric("mlike", fun) } setMethod("mlike", "rvm", function(object) object@mlike) setGeneric("mlike<-", function(x, value) standardGeneric("mlike<-")) setReplaceMethod("mlike", "rvm", function(x, value) { x@mlike <- value x }) setClass("inchol",representation("matrix", pivots="vector", diagresidues="vector", maxresiduals="vector"), prototype=structure(.Data=matrix(), pivots=vector(), diagresidues=vector(), maxresiduals=vector())) if(!isGeneric("pivots")){ if (is.function("pivots")) fun <- pivots else fun <- function(object) standardGeneric("pivots") setGeneric("pivots", fun) } setMethod("pivots", "inchol", function(object) object@pivots) setGeneric("pivots<-", function(x, value) standardGeneric("pivots<-")) setReplaceMethod("pivots", "inchol", function(x, value) { x@pivots <- value x }) if(!isGeneric("diagresidues")){ if (is.function("diagresidues")) fun <- diagresidues else fun <- function(object) standardGeneric("diagresidues") setGeneric("diagresidues", fun) } setMethod("diagresidues", "inchol", function(object) object@diagresidues) setGeneric("diagresidues<-", function(x,value) standardGeneric("diagresidues<-")) setReplaceMethod("diagresidues", "inchol", function(x, value) { x@diagresidues <- value x }) if(!isGeneric("maxresiduals")){ if (is.function("maxresiduals")) fun <- maxresiduals else fun <- function(object) standardGeneric("maxresiduals") setGeneric("maxresiduals", fun) } setMethod("maxresiduals", "inchol", function(object) object@maxresiduals) setGeneric("maxresiduals<-", function(x,value) standardGeneric("maxresiduals<-")) setReplaceMethod("maxresiduals", "inchol", function(x, value) { x@maxresiduals <- value x }) ## csi object setClass("csi",representation(Q = "matrix", R = "matrix", truegain = "vector", predgain = "vector"),contains="inchol") if(!isGeneric("Q")){ if (is.function("Q")) fun <- Q else fun <- function(object) standardGeneric("Q") setGeneric("Q", fun) } setMethod("Q", "csi", function(object) object@Q) setGeneric("Q<-", function(x, value) standardGeneric("Q<-")) setReplaceMethod("Q", "csi", function(x, value) { x@Q <- value x }) if(!isGeneric("R")){ if (is.function("R")) fun <- R else fun <- function(object) standardGeneric("R") setGeneric("R", fun) } setMethod("R", "csi", function(object) object@R) setGeneric("R<-", function(x, value) standardGeneric("R<-")) setReplaceMethod("R", "csi", function(x, value) { x@R <- value x }) if(!isGeneric("truegain")){ if (is.function("truegain")) fun <- truegain else fun <- function(object) standardGeneric("truegain") setGeneric("truegain", fun) } setMethod("truegain", "csi", function(object) object@truegain) setGeneric("truegain<-", function(x, value) standardGeneric("truegain<-")) setReplaceMethod("truegain", "csi", function(x, value) { x@truegain <- value x }) if(!isGeneric("predgain")){ if (is.function("predgain")) fun <- predgain else fun <- function(object) standardGeneric("predgain") setGeneric("predgain", fun) } setMethod("predgain", "csi", function(object) object@predgain) setGeneric("predgain<-", function(x, value) standardGeneric("predgain<-")) setReplaceMethod("predgain", "csi", function(x, value) { x@predgain <- value x }) setClass("specc",representation("vector", centers="matrix", size="vector", kernelf="kfunction", withinss = "vector" ),prototype=structure(.Data=vector(), centers = matrix(), size=matrix(), kernelf = ls, withinss=vector())) if(!isGeneric("centers")){ if (is.function("centers")) fun <- centers else fun <- function(object) standardGeneric("centers") setGeneric("centers", fun) } setMethod("centers", "specc", function(object) object@centers) setGeneric("centers<-", function(x,value) standardGeneric("centers<-")) setReplaceMethod("centers", "specc", function(x, value) { x@centers <- value x }) if(!isGeneric("size")){ if (is.function("size")) fun <- size else fun <- function(object) standardGeneric("size") setGeneric("size", fun) } setMethod("size", "specc", function(object) object@size) setGeneric("size<-", function(x,value) standardGeneric("size<-")) setReplaceMethod("size", "specc", function(x, value) { x@size <- value x }) if(!isGeneric("withinss")){ if (is.function("withinss")) fun <- withinss else fun <- function(object) standardGeneric("withinss") setGeneric("withinss", fun) } setMethod("withinss", "specc", function(object) object@withinss) setGeneric("withinss<-", function(x,value) standardGeneric("withinss<-")) setReplaceMethod("withinss", "specc", function(x, value) { x@withinss <- value x }) setMethod("kernelf","specc", function(object) object@kernelf) setReplaceMethod("kernelf","specc", function(x, value){ x@kernelf <- value x }) setClass("ranking",representation("matrix", convergence="matrix", edgegraph="matrix"), prototype=structure(.Data=matrix(), convergence=matrix(), edgegraph=matrix())) if(!isGeneric("convergence")){ if (is.function("convergence")) fun <- convergence else fun <- function(object) standardGeneric("convergence") setGeneric("convergence", fun) } setMethod("convergence", "ranking", function(object) object@convergence) setGeneric("convergence<-", function(x,value) standardGeneric("convergence<-")) setReplaceMethod("convergence", "ranking", function(x, value) { x@convergence <- value x }) if(!isGeneric("edgegraph")){ if (is.function("edgegraph")) fun <- edgegraph else fun <- function(object) standardGeneric("edgegraph") setGeneric("edgegraph", fun) } setMethod("edgegraph", "ranking", function(object) object@edgegraph) setGeneric("edgegraph<-", function(x,value) standardGeneric("edgegraph<-")) setReplaceMethod("edgegraph", "ranking", function(x, value) { x@edgegraph <- value x }) ## online learning algorithms class setClass("onlearn", representation( kernelf = "kfunction", buffer = "numeric", kpar = "list", xmatrix = "matrix", fit = "numeric", onstart = "numeric", onstop = "numeric", alpha = "ANY", rho = "numeric", b = "numeric", pattern ="ANY", type="character" )) if(!isGeneric("fit")){ if (is.function("fit")) fun <- fit else fun <- function(object) standardGeneric("fit") setGeneric("fit", fun) } setMethod("fit","onlearn", function(object) object@fit) setGeneric("fit<-", function(x, value) standardGeneric("fit<-")) setReplaceMethod("fit","onlearn", function(x, value){ x@fit <- value x }) if(!isGeneric("onstart")){ if (is.function("onstart")) fun <- onstart else fun <- function(object) standardGeneric("onstart") setGeneric("onstart", fun) } setMethod("onstart", "onlearn", function(object) object@onstart) setGeneric("onstart<-", function(x, value) standardGeneric("onstart<-")) setReplaceMethod("onstart", "onlearn", function(x, value) { x@onstart <- value x }) if(!isGeneric("onstop")){ if (is.function("onstop")) fun <- onstop else fun <- function(object) standardGeneric("onstop") setGeneric("onstop", fun) } setMethod("onstop", "onlearn", function(object) object@onstop) setGeneric("onstop<-", function(x, value) standardGeneric("onstop<-")) setReplaceMethod("onstop", "onlearn", function(x, value) { x@onstop <- value x }) if(!isGeneric("buffer")){ if (is.function("buffer")) fun <- buffer else fun <- function(object) standardGeneric("buffer") setGeneric("buffer", fun) } setMethod("buffer", "onlearn", function(object) object@buffer) setGeneric("buffer<-", function(x, value) standardGeneric("buffer<-")) setReplaceMethod("buffer", "onlearn", function(x, value) { x@buffer <- value x }) setMethod("kernelf","onlearn", function(object) object@kernelf) setReplaceMethod("kernelf","onlearn", function(x, value){ x@kernelf <- value x }) setMethod("kpar","onlearn", function(object) object@kpar) setReplaceMethod("kpar","onlearn", function(x, value){ x@kpar <- value x }) setMethod("xmatrix","onlearn", function(object) object@xmatrix) setReplaceMethod("xmatrix","onlearn", function(x, value){ x@xmatrix <- value x }) setMethod("alpha","onlearn", function(object) object@alpha) setReplaceMethod("alpha","onlearn", function(x, value){ x@alpha <- value x }) setMethod("b","onlearn", function(object) object@b) setReplaceMethod("b","onlearn", function(x, value){ x@b <- value x }) setMethod("type","onlearn", function(object) object@type) setReplaceMethod("type","onlearn", function(x, value){ x@type <- value x }) if(!isGeneric("rho")){ if (is.function("rho")) fun <- rho else fun <- function(object) standardGeneric("rho") setGeneric("rho", fun) } setMethod("rho", "onlearn", function(object) object@rho) setGeneric("rho<-", function(x, value) standardGeneric("rho<-")) setReplaceMethod("rho", "onlearn", function(x, value) { x@rho <- value x }) if(!isGeneric("pattern")){ if (is.function("pattern")) fun <- pattern else fun <- function(object) standardGeneric("pattern") setGeneric("pattern", fun) } setMethod("pattern", "onlearn", function(object) object@pattern) setGeneric("pattern<-", function(x, value) standardGeneric("pattern<-")) setReplaceMethod("pattern", "onlearn", function(x, value) { x@pattern <- value x }) setClass("kfa",representation(alpha = "matrix", alphaindex = "vector", kernelf = "kfunction", xmatrix = "matrix", kcall = "call", terms = "ANY" )) setMethod("coef", "kfa", function(object, ...) object@alpha) setMethod("kernelf","kfa", function(object) object@kernelf) setReplaceMethod("kernelf","kfa", function(x, value){ x@kernelf <- value x }) setMethod("alphaindex","kfa", function(object) object@alphaindex) setReplaceMethod("alphaindex","kfa", function(x, value){ x@alphaindex <- value x }) setMethod("alpha","kfa", function(object) object@alpha) setReplaceMethod("alpha","kfa", function(x, value){ x@alpha <- value x }) setMethod("xmatrix","kfa", function(object) object@xmatrix) setReplaceMethod("xmatrix","kfa", function(x, value){ x@xmatrix <- value x }) setMethod("kcall","kfa", function(object) object@kcall) setReplaceMethod("kcall","kfa", function(x, value){ x@kcall <- value x }) setMethod("terms","kfa", function(x, ...) x@terms) setReplaceMethod("terms","kfa", function(x, value){ x@terms <- value x }) ## kernel hebbian algorithm object setClass("kha", representation(eskm ="vector"),contains="prc") ## accessor functions if(!isGeneric("eskm")){ if (is.function("eskm")) fun <- eskm else fun <- function(object) standardGeneric("eskm") setGeneric("eskm", fun) } setMethod("eskm", "kha", function(object) object@eskm) setGeneric("eskm<-", function(x, value) standardGeneric("eskm<-")) setReplaceMethod("eskm", "kha", function(x, value) { x@eskm <- value x }) kernlab/R/kpca.R0000644000175100001440000001214412676464735013176 0ustar hornikusers## kpca function ## author : alexandros setGeneric("kpca",function(x, ...) standardGeneric("kpca")) setMethod("kpca", signature(x = "formula"), function(x, data = NULL, na.action = na.omit, ...) { mt <- terms(x, data = data) if(attr(mt, "response") > 0) stop("response not allowed in formula") attr(mt, "intercept") <- 0 cl <- match.call() mf <- match.call(expand.dots = FALSE) mf$formula <- mf$x mf$... <- NULL mf[[1L]] <- quote(stats::model.frame) mf <- eval(mf, parent.frame()) na.act <- attr(mf, "na.action") Terms <- attr(mf, "terms") x <- model.matrix(mt, mf) res <- kpca(x, ...) ## fix up call to refer to the generic, but leave arg name as `formula' cl[[1]] <- as.name("kpca") kcall(res) <- cl attr(Terms,"intercept") <- 0 terms(res) <- Terms if(!is.null(na.act)) n.action(res) <- na.act return(res) }) ## Matrix Interface setMethod("kpca",signature(x="matrix"), function(x, kernel = "rbfdot", kpar = list(sigma = 0.1), features = 0, th = 1e-4, na.action = na.omit, ...) { x <- na.action(x) x <- as.matrix(x) m <- nrow(x) ret <- new("kpca") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") km <- kernelMatrix(kernel,x) ## center kernel matrix kc <- t(t(km - colSums(km)/m) - rowSums(km)/m) + sum(km)/m^2 ## compute eigenvectors res <- eigen(kc/m,symmetric=TRUE) if(features == 0) features <- sum(res$values > th) else if(res$values[features] < th) warning(paste("eigenvalues of the kernel matrix are below threshold!")) pcv(ret) <- t(t(res$vectors[,1:features])/sqrt(res$values[1:features])) eig(ret) <- res$values[1:features] names(eig(ret)) <- paste("Comp.", 1:features, sep = "") rotated(ret) <- kc %*% pcv(ret) kcall(ret) <- match.call() kernelf(ret) <- kernel xmatrix(ret) <- x return(ret) }) ## List Interface setMethod("kpca",signature(x="list"), function(x, kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), features = 0, th = 1e-4, na.action = na.omit, ...) { x <- na.action(x) m <- length(x) ret <- new("kpca") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") km <- kernelMatrix(kernel,x) ## center kernel matrix kc <- t(t(km - colSums(km)/m) - rowSums(km)/m) + sum(km)/m^2 ## compute eigenvectors res <- eigen(kc/m,symmetric=TRUE) if(features == 0) features <- sum(res$values > th) else if(res$values[features] < th) warning(paste("eigenvalues of the kernel matrix are below threshold!")) pcv(ret) <- t(t(res$vectors[,1:features])/sqrt(res$values[1:features])) eig(ret) <- res$values[1:features] names(eig(ret)) <- paste("Comp.", 1:features, sep = "") rotated(ret) <- kc %*% pcv(ret) kcall(ret) <- match.call() kernelf(ret) <- kernel xmatrix(ret) <- x return(ret) }) ## Kernel Matrix Interface setMethod("kpca",signature(x= "kernelMatrix"), function(x, features = 0, th = 1e-4, ...) { ret <- new("kpca") m <- dim(x)[1] if(m!= dim(x)[2]) stop("Kernel matrix has to be symetric, and positive semidefinite") ## center kernel matrix kc <- t(t(x - colSums(x)/m) - rowSums(x)/m) + sum(x)/m^2 ## compute eigenvectors res <- eigen(kc/m,symmetric=TRUE) if(features == 0) features <- sum(res$values > th) else if(res$values[features] < th) warning(paste("eigenvalues of the kernel matrix are below threshold!")) pcv(ret) <- t(t(res$vectors[,1:features])/sqrt(res$values[1:features])) eig(ret) <- res$values[1:features] names(eig(ret)) <- paste("Comp.", 1:features, sep = "") rotated(ret) <- kc %*% pcv(ret) kcall(ret) <- match.call() xmatrix(ret) <- x kernelf(ret) <- " Kernel matrix used." return(ret) }) ## project a new matrix into the feature space setMethod("predict",signature(object="kpca"), function(object , x) { if (!is.null(terms(object))) { if(!is.matrix(x) || !is(x,"list")) x <- model.matrix(delete.response(terms(object)), as.data.frame(x), na.action = n.action(object)) } else x <- if (is.vector(x)) t(t(x)) else if (!is(x,"list")) x <- as.matrix(x) if (is.vector(x) || is.data.frame(x)) x <- as.matrix(x) if (!is.matrix(x) && !is(x,"list")) stop("x must be a matrix a vector, a data frame, or a list") if(is(x,"matrix")) { n <- nrow(x) m <- nrow(xmatrix(object))} else { n <- length(x) m <- length(xmatrix(object)) } if(is.character(kernelf(object))) { knc <- x ka <- xmatrix(object) } else { knc <- kernelMatrix(kernelf(object),x,xmatrix(object)) ka <- kernelMatrix(kernelf(object),xmatrix(object)) } ## center ret <- t(t(knc - rowSums(knc)/m) - rowSums(ka)/m) + sum(ka)/(m*n) return(ret %*% pcv(object)) }) kernlab/R/kernels.R0000644000175100001440000023705312055335057013714 0ustar hornikusers## kernel functions ## Functions for computing a kernel value, matrix, matrix-vector ## product and quadratic form ## ## author : alexandros karatzoglou ## Define the kernel objects, ## functions with an additional slot for the kernel parameter list. ## kernel functions take two vector arguments and return a scalar (dot product) rbfdot<- function(sigma=1) { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must a vector") if (is(x,"vector") && is.null(y)){ return(1) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") return(exp(sigma*(2*crossprod(x,y) - crossprod(x) - crossprod(y)))) # sigma/2 or sigma ?? } } return(new("rbfkernel",.Data=rval,kpar=list(sigma=sigma))) } setClass("rbfkernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) laplacedot<- function(sigma=1) { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must a vector") if (is(x,"vector") && is.null(y)){ return(1) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") return(exp(-sigma*sqrt(-(round(2*crossprod(x,y) - crossprod(x) - crossprod(y),9))))) } } return(new("laplacekernel",.Data=rval,kpar=list(sigma=sigma))) } setClass("laplacekernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) besseldot<- function(sigma = 1, order = 1, degree = 1) { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must a vector") if (is(x,"vector") && is.null(y)){ return(1) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") lim <- 1/(gamma(order+1)*2^(order)) bkt <- sigma*sqrt(-(2*crossprod(x,y) - crossprod(x) - crossprod(y))) if(bkt < 10e-5) res <- lim else res <- besselJ(bkt,order)*(bkt^(-order)) return((res/lim)^degree) } } return(new("besselkernel",.Data=rval,kpar=list(sigma=sigma ,order = order ,degree = degree))) } setClass("besselkernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) anovadot<- function(sigma = 1, degree = 1) { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must a vector") if (is(x,"vector") && is.null(y)){ return(1) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") res <- sum(exp(- sigma * (x - y)^2)) return((res)^degree) } } return(new("anovakernel",.Data=rval,kpar=list(sigma=sigma ,degree = degree))) } setClass("anovakernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) splinedot<- function() { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must a vector") if (is(x,"vector") && is.null(y)){ return(1) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") minv <- pmin(x,y) res <- 1 + x*y*(1+minv) - ((x+y)/2)*minv^2 + (minv^3)/3 fres <- prod(res) return(fres) } } return(new("splinekernel",.Data=rval,kpar=list())) } setClass("splinekernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) fourierdot <- function(sigma = 1) { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must a vector") if (is(x,"vector") && is.null(y)){ return(1) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") res <- (1 - sigma^2)/2*(1 - 2*sigma*cos(x - y) + sigma^2) fres <- prod(res) return(fres) } } return(new("fourierkernel",.Data=rval,kpar=list())) } setClass("fourierkernel",prototype=structure(.Data=function(){},kpar=list(sigma = 1)),contains=c("kernel")) tanhdot <- function(scale = 1, offset = 1) { rval<- function(x, y = NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") if (is(x,"vector") && is.null(y)){ tanh(scale*crossprod(x)+offset) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") tanh(scale*crossprod(x,y)+offset) } } return(new("tanhkernel",.Data=rval,kpar=list(scale=scale,offset=offset))) } setClass("tanhkernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) setClass("polykernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) polydot <- function(degree = 1, scale = 1, offset = 1) { rval<- function(x, y = NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") if (is(x,"vector") && is.null(y)){ (scale*crossprod(x)+offset)^degree } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") (scale*crossprod(x,y)+offset)^degree } } return(new("polykernel",.Data=rval,kpar=list(degree=degree,scale=scale,offset=offset))) } setClass("vanillakernel",prototype=structure(.Data=function(){},kpar=list()),contains=c("kernel")) vanilladot <- function( ) { rval<- function(x, y = NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") if (is(x,"vector") && is.null(y)){ crossprod(x) } if (is(x,"vector") && is(y,"vector")){ if (!length(x)==length(y)) stop("number of dimension must be the same on both data points") crossprod(x,y) } } return(new("vanillakernel",.Data=rval,kpar=list())) } setClass("stringkernel",prototype=structure(.Data=function(){},kpar=list(length = 4, lambda = 1.1, type = "spectrum", normalized = TRUE)),contains=c("kernel")) stringdot <- function(length = 4, lambda = 1.1, type = "spectrum", normalized = TRUE) { type <- match.arg(type,c("sequence","string","fullstring","exponential","constant","spectrum", "boundrange")) ## need to do this to set the length parameters if(type == "spectrum" | type == "boundrange") lambda <- length switch(type, "sequence" = { rval<- function(x, y = NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") if (is(x,"vector") && is.null(y) && normalized == FALSE) return(.Call("subsequencek",as.character(x), as.character(x), as.integer(nchar(x)), as.integer(nchar(x)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")) if (is(x,"vector") && is(y,"vector") && normalized == FALSE) return(.Call("subsequencek",as.character(x), as.character(y), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")) if (is(x,"vector") && is.null(y) && normalized == TRUE) return(1) if (is(x,"vector") && is(y,"vector") && normalized == TRUE) return(.Call("subsequencek",as.character(x), as.character(y), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")/sqrt(.Call("subsequencek",as.character(x), as.character(x), as.integer(nchar(x)), as.integer(nchar(x)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")*.Call("subsequencek",as.character(y), as.character(y), as.integer(nchar(y)), as.integer(nchar(y)), as.integer(length), as.double(lambda),PACKAGE = "kernlab"))) } }, "exponential" = { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") x <- paste(x,"\n",sep="") if(!is.null(y)) y <- paste(y,"\n",sep="") if (normalized == FALSE){ if(is.null(y)) y <- x return(.Call("stringtv",as.character(x),as.character(y),as.integer(1),as.integer(nchar(x)),as.integer(nchar(y)),as.integer(2),as.double(lambda)))} if (is(x,"vector") && is.null(y) && normalized == TRUE) return(1) if (is(x,"vector") && is(y,"vector") && normalized == TRUE) return(.Call("stringtv",as.character(x),as.character(y),as.integer(1),as.integer(nchar(x)),as.integer(nchar(y)),as.integer(2),as.double(lambda))/sqrt(.Call("stringtv",as.character(x),as.character(x),as.integer(1),as.integer(nchar(x)),as.integer(nchar(x)),as.integer(2),as.double(lambda))*.Call("stringtv",as.character(y),as.character(y),as.integer(1),as.integer(nchar(y)),as.integer(nchar(y)),as.integer(2),as.double(lambda)))) } }, "constant" = { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") x <- paste(x,"\n",sep="") if(!is.null(y)) y <- paste(y,"\n",sep="") if (normalized == FALSE){ if(is.null(y)) y <- x return(.Call("stringtv",as.character(x),as.character(y),as.integer(1),as.integer(nchar(x)),as.integer(nchar(y)),as.integer(1),as.double(lambda)))} if (is(x,"vector") && is.null(y) && normalized == TRUE) return(1) if (is(x,"vector") && is(y,"vector") && normalized == TRUE) return(.Call("stringtv",as.character(x),as.character(y),as.integer(1),as.integer(nchar(x)),as.integer(nchar(y)),as.integer(1),as.double(lambda))/sqrt(.Call("stringtv",as.character(x),as.character(x),as.integer(1),as.integer(nchar(x)),as.integer(nchar(x)),as.integer(1),as.double(lambda))*.Call("stringtv",as.character(y),as.character(y),as.integer(1),as.integer(nchar(y)),as.integer(nchar(y)),as.integer(1),as.double(lambda)))) } }, "spectrum" = { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") x <- paste(x,"\n",sep="") if(!is.null(y)) y <- paste(y,"\n",sep="") n <- nchar(x) m <- nchar(y) if(n < length | m < length){ warning("String length smaller than length parameter value") return(0)} if (normalized == FALSE){ if(is.null(y)) y <- x return(.Call("stringtv",as.character(x),as.character(y),as.integer(1),as.integer(n),as.integer(m),as.integer(3),as.double(length)))} if (is(x,"vector") && is.null(y) && normalized == TRUE) return(1) if (is(x,"vector") && is(y,"vector") && normalized == TRUE) return(.Call("stringtv",as.character(x),as.character(y),as.integer(1),as.integer(n),as.integer(m),as.integer(3),as.double(length))/sqrt(.Call("stringtv",as.character(x),as.character(x),as.integer(1),as.integer(n),as.integer(n),as.integer(3),as.double(lambda))*.Call("stringtv",as.character(y),as.character(y),as.integer(1),as.integer(m),as.integer(m),as.integer(3),as.double(length)))) } }, "boundrange" = { rval <- function(x,y=NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") x <- paste(x,"\n",sep="") if(!is.null(y)) y <- paste(y,"\n",sep="") if (normalized == FALSE){ if(is.null(y)) y <- x return(.Call("stringtv",as.character(x),as.character(y),as.integer(1),as.integer(nchar(x)),as.integer(nchar(y)),as.integer(4),as.double(lambda)))} if (is(x,"vector") && is.null(y) && normalized == TRUE) return(1) if (is(x,"vector") && is(y,"vector") && normalized == TRUE) return(.Call("stringtv",as.character(x),as.character(y),as.integer(1),as.integer(nchar(x)),as.integer(nchar(y)),as.integer(4),as.double(lambda))/sqrt(.Call("stringtv",as.character(x),as.character(x),as.integer(1),as.integer(nchar(x)),as.integer(nchar(x)),as.integer(4),as.double(lambda))*.Call("stringtv",as.character(y),as.character(y),as.integer(1),as.integer(nchar(y)),as.integer(nchar(y)),as.integer(4),as.double(lambda)))) } }, "string" = { rval<- function(x, y = NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") if (is(x,"vector") && is.null(y) && normalized == FALSE) return(.Call("substringk",as.character(x), as.character(x), as.integer(nchar(x)), as.integer(nchar(x)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")) if (is(x,"vector") && is(y,"vector") && normalized == FALSE) return(.Call("substringk",as.character(x), as.character(y), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")) if (is(x,"vector") && is.null(y) && normalized == TRUE) return(1) if (is(x,"vector") && is(y,"vector") && normalized == TRUE) return(.Call("substringk",as.character(x), as.character(y), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")/sqrt(.Call("substringk",as.character(x), as.character(x), as.integer(nchar(x)), as.integer(nchar(x)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")*.Call("substringk",as.character(y), as.character(y), as.integer(nchar(y)), as.integer(nchar(y)), as.integer(length), as.double(lambda),PACKAGE = "kernlab"))) } }, "fullstring" = { rval<- function(x, y = NULL) { if(!is(x,"vector")) stop("x must be a vector") if(!is(y,"vector")&&!is.null(y)) stop("y must be a vector") if (is(x,"vector") && is.null(y) && normalized == FALSE) return(.Call("fullsubstringk",as.character(x), as.character(x), as.integer(nchar(x)), as.integer(nchar(x)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")) if (is(x,"vector") && is(y,"vector") && normalized == FALSE) return(.Call("fullsubstringk",as.character(x), as.character(y), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")) if (is(x,"vector") && is.null(y) && normalized == TRUE) return(1) if (is(x,"vector") && is(y,"vector") && normalized == TRUE) return(.Call("fullsubstringk",as.character(x), as.character(y), as.integer(nchar(x)), as.integer(nchar(y)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")/sqrt(.Call("fullsubstringk",as.character(x), as.character(x), as.integer(nchar(x)), as.integer(nchar(x)), as.integer(length), as.double(lambda),PACKAGE = "kernlab")*.Call("fullsubstringk",as.character(y), as.character(y), as.integer(nchar(y)), as.integer(nchar(y)), as.integer(length), as.double(lambda),PACKAGE = "kernlab"))) } }) return(new("stringkernel",.Data=rval,kpar=list(length=length, lambda =lambda, type = type, normalized = normalized))) } ## show method for kernel functions setMethod("show",signature(object="kernel"), function(object) { switch(class(object), "rbfkernel" = cat(paste("Gaussian Radial Basis kernel function.", "\n","Hyperparameter :" ,"sigma = ", kpar(object)$sigma,"\n")), "laplacekernel" = cat(paste("Laplace kernel function.", "\n","Hyperparameter :" ,"sigma = ", kpar(object)$sigma,"\n")), "besselkernel" = cat(paste("Bessel kernel function.", "\n","Hyperparameter :" ,"sigma = ", kpar(object)$sigma,"order = ",kpar(object)$order, "degree = ", kpar(object)$degree,"\n")), "anovakernel" = cat(paste("Anova RBF kernel function.", "\n","Hyperparameter :" ,"sigma = ", kpar(object)$sigma, "degree = ", kpar(object)$degree,"\n")), "tanhkernel" = cat(paste("Hyperbolic Tangent kernel function.", "\n","Hyperparameters :","scale = ", kpar(object)$scale," offset = ", kpar(object)$offset,"\n")), "polykernel" = cat(paste("Polynomial kernel function.", "\n","Hyperparameters :","degree = ",kpar(object)$degree," scale = ", kpar(object)$scale," offset = ", kpar(object)$offset,"\n")), "vanillakernel" = cat(paste("Linear (vanilla) kernel function.", "\n")), "splinekernel" = cat(paste("Spline kernel function.", "\n")), "stringkernel" = { if(kpar(object)$type =="spectrum" | kpar(object)$type =="boundrange") cat(paste("String kernel function.", " Type = ", kpar(object)$type, "\n","Hyperparameters :","sub-sequence/string length = ",kpar(object)$length, "\n")) else if(kpar(object)$type =="exponential" | kpar(object)$type =="constant") cat(paste("String kernel function.", " Type = ", kpar(object)$type, "\n","Hyperparameters :"," lambda = ", kpar(object)$lambda, "\n")) else cat(paste("String kernel function.", " Type = ", kpar(object)$type, "\n","Hyperparameters :","sub-sequence/string length = ",kpar(object)$length," lambda = ", kpar(object)$lambda, "\n")) if(kpar(object)$normalized == TRUE) cat(" Normalized","\n") if(kpar(object)$normalized == FALSE) cat(" Not Normalized","\n")} ) }) ## create accesor function as in "S4 Classses in 15 pages more or less", well.. if (!isGeneric("kpar")){ if (is.function(kpar)) fun <- kpar else fun <- function(object) standardGeneric("kpar") setGeneric("kpar",fun) } setMethod("kpar","kernel", function(object) object@kpar) ## Functions that return usefull kernel calculations (kernel matrix etc.) ## kernelMatrix function takes two or three arguments kernelMatrix <- function(kernel, x, y=NULL) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(x,"matrix")) stop("x must be a matrix") if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix") n <- nrow(x) res1 <- matrix(rep(0,n*n), ncol = n) if(is.null(y)){ for(i in 1:n) { for(j in i:n) { res1[i,j] <- kernel(x[i,],x[j,]) } } res1 <- res1 + t(res1) diag(res1) <- diag(res1)/2 } if (is(y,"matrix")){ m<-dim(y)[1] res1 <- matrix(0,dim(x)[1],dim(y)[1]) for(i in 1:n) { for(j in 1:m) { res1[i,j] <- kernel(x[i,],y[j,]) } } } return(as.kernelMatrix(res1)) } setGeneric("kernelMatrix",function(kernel, x, y = NULL) standardGeneric("kernelMatrix")) kernelMatrix.rbfkernel <- function(kernel, x, y = NULL) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma n <- dim(x)[1] dota <- rowSums(x*x)/2 if (is(x,"matrix") && is.null(y)){ res <- crossprod(t(x)) for (i in 1:n) res[i,]<- exp(2*sigma*(res[i,] - dota - rep(dota[i],n))) return(as.kernelMatrix(res)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m) res[,i]<- exp(2*sigma*(res[,i] - dota - rep(dotb[i],n))) return(as.kernelMatrix(res)) } } setMethod("kernelMatrix",signature(kernel="rbfkernel"),kernelMatrix.rbfkernel) kernelMatrix.laplacekernel <- function(kernel, x, y = NULL) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma n <- dim(x)[1] dota <- rowSums(x*x)/2 if (is(x,"matrix") && is.null(y)){ res <- crossprod(t(x)) for (i in 1:n) res[i,]<- exp(-sigma*sqrt(round(-2*(res[i,] - dota - rep(dota[i],n)),9))) return(as.kernelMatrix(res)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m) res[,i]<- exp(-sigma*sqrt(round(-2*(res[,i] - dota - rep(dotb[i],n)),9))) return(as.kernelMatrix(res)) } } setMethod("kernelMatrix",signature(kernel="laplacekernel"),kernelMatrix.laplacekernel) kernelMatrix.besselkernel <- function(kernel, x, y = NULL) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma nu = kpar(kernel)$order ni = kpar(kernel)$degree n <- dim(x)[1] lim <- 1/(gamma(nu+1)*2^(nu)) dota <- rowSums(x*x)/2 if (is(x,"matrix") && is.null(y)){ res <- crossprod(t(x)) for (i in 1:n){ xx <- sigma*sqrt(round(-2*(res[i,] - dota - rep(dota[i],n)),9)) res[i,] <- besselJ(xx,nu)*(xx^(-nu)) res[i,which(xx<10e-5)] <- lim } return(as.kernelMatrix((res/lim)^ni)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m){ xx <- sigma*sqrt(round(-2*(res[,i] - dota - rep(dotb[i],n)),9)) res[,i] <- besselJ(xx,nu)*(xx^(-nu)) res[which(xx<10e-5),i] <- lim } return(as.kernelMatrix((res/lim)^ni)) } } setMethod("kernelMatrix",signature(kernel="besselkernel"),kernelMatrix.besselkernel) kernelMatrix.anovakernel <- function(kernel, x, y = NULL) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma degree = kpar(kernel)$degree n <- dim(x)[1] if (is(x,"matrix") && is.null(y)){ a <- matrix(0, dim(x)[2], n) res <- matrix(0, n ,n) for (i in 1:n) { a[rep(TRUE,dim(x)[2]), rep(TRUE,n)] <- x[i,] res[i,]<- colSums(exp( - sigma*(a - t(x))^2))^degree } return(as.kernelMatrix(res)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] b <- matrix(0, dim(x)[2],m) res <- matrix(0, dim(x)[1],m) for( i in 1:n) { b[rep(TRUE,dim(x)[2]), rep(TRUE,m)] <- x[i,] res[i,]<- colSums(exp( - sigma*(b - t(y))^2))^degree } return(as.kernelMatrix(res)) } } setMethod("kernelMatrix",signature(kernel="anovakernel"),kernelMatrix.anovakernel) kernelMatrix.polykernel <- function(kernel, x, y = NULL) { if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix") scale = kpar(kernel)$scale offset = kpar(kernel)$offset degree = kpar(kernel)$degree if (is(x,"matrix") && is.null(y)) { res <- (scale*crossprod(t(x))+offset)^degree return(as.kernelMatrix(res)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") res <- (scale*crossprod(t(x),t(y)) + offset)^degree return(as.kernelMatrix(res)) } } setMethod("kernelMatrix",signature(kernel="polykernel"),kernelMatrix.polykernel) kernelMatrix.vanilla <- function(kernel, x, y = NULL) { if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix") if (is(x,"matrix") && is.null(y)){ res <- crossprod(t(x)) return(as.kernelMatrix(res)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") res <- crossprod(t(x),t(y)) return(as.kernelMatrix(res)) } } setMethod("kernelMatrix",signature(kernel="vanillakernel"),kernelMatrix.vanilla) kernelMatrix.tanhkernel <- function(kernel, x, y = NULL) { if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix") if (is(x,"matrix") && is.null(y)){ scale = kpar(kernel)$scale offset = kpar(kernel)$offset res <- tanh(scale*crossprod(t(x)) + offset) return(as.kernelMatrix(res)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") res <- tanh(scale*crossprod(t(x),t(y)) + offset) return(as.kernelMatrix(res)) } } setMethod("kernelMatrix",signature(kernel="tanhkernel"),kernelMatrix.tanhkernel) kernelMatrix.splinekernel <- function(kernel, x, y = NULL) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma degree = kpar(kernel)$degree n <- dim(x)[1] if (is(x,"matrix") && is.null(y)){ a <- matrix(0, dim(x)[2], n) res <- matrix(0, n ,n) x <- t(x) for (i in 1:n) { dr <- x + x[,i] dp <- x * x[,i] dm <- pmin(x,x[,i]) res[i,] <- apply((1 + dp + dp*dm - (dr/2)*dm^2 + (dm^3)/3),2, prod) } return(as.kernelMatrix(res)) } if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] b <- matrix(0, dim(x)[2],m) res <- matrix(0, dim(x)[1],m) x <- t(x) y <- t(y) for( i in 1:n) { dr <- y + x[,i] dp <- y * x[,i] dm <- pmin(y,x[,i]) res[i,] <- apply((1 + dp + dp*dm - (dr/2)*dm^2 + (dm^3)/3),2, prod) } return(as.kernelMatrix(res)) } } setMethod("kernelMatrix",signature(kernel="splinekernel"),kernelMatrix.splinekernel) kernelMatrix.stringkernel <- function(kernel, x, y=NULL) { n <- length(x) res1 <- matrix(rep(0,n*n), ncol = n) normalized = kpar(kernel)$normalized if(is(x,"list")) x <- sapply(x,paste,collapse="") if(is(y,"list")) y <- sapply(y,paste,collapse="") if (kpar(kernel)$type == "sequence" |kpar(kernel)$type == "string"|kpar(kernel)$type == "fullstring") { resdiag <- rep(0,n) if(normalized == TRUE) kernel <- stringdot(length = kpar(kernel)$length, type = kpar(kernel)$type, lambda = kpar(kernel)$lambda, normalized = FALSE) ## y is null if(is.null(y)){ if(normalized == TRUE){ ## calculate diagonal elements first, and use them to normalize for (i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:n) { for(j in (i:n)[-1]) { res1[i,j] <- kernel(x[[i]],x[[j]])/sqrt(resdiag[i]*resdiag[j]) } } res1 <- res1 + t(res1) diag(res1) <- rep(1,n) } else{ for (i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:n) { for(j in (i:n)[-1]) { res1[i,j] <- kernel(x[[i]],x[[j]]) } } res1 <- res1 + t(res1) diag(res1) <- resdiag } } if (!is.null(y)){ m <- length(y) res1 <- matrix(0,n,m) resdiag1 <- rep(0,m) if(normalized == TRUE){ for(i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:m) resdiag1[i] <- kernel(y[[i]],y[[i]]) for(i in 1:n) { for(j in 1:m) { res1[i,j] <- kernel(x[[i]],y[[j]])/sqrt(resdiag[i]*resdiag1[j]) } } } else{ for(i in 1:n) { for(j in 1:m) { res1[i,j] <- kernel(x[[i]],y[[j]]) } } } } return(as.kernelMatrix(res1)) } else { switch(kpar(kernel)$type, "exponential" = sktype <- 2, "constant" = sktype <- 1, "spectrum" = sktype <- 3, "boundrange" = sktype <- 4) if(sktype==3 &(any(nchar(x) < kpar(kernel)$length)|any(nchar(x) < kpar(kernel)$length))) stop("spectral kernel does not accept strings shorter than the length parameter") if(is(x,"list")) x <- unlist(x) if(is(y,"list")) y <- unlist(y) x <- paste(x,"\n",sep="") if(!is.null(y)) y <- paste(y,"\n",sep="") if(is.null(y)) ret <- matrix(0, length(x),length(x)) else ret <- matrix(0,length(x),length(y)) if(is.null(y)){ for(i in 1:length(x)) ret[i,i:length(x)] <- .Call("stringtv",as.character(x[i]),as.character(x[i:length(x)]),as.integer(length(x) - i + 1),as.integer(nchar(x[i])),as.integer(nchar(x[i:length(x)])),as.integer(sktype),as.double(kpar(kernel)$lambda)) ret <- ret + t(ret) diag(ret) <- diag(ret)/2 } else for(i in 1:length(x)) ret[i,] <- .Call("stringtv",as.character(x[i]),as.character(y),as.integer(length(y)),as.integer(nchar(x[i])),as.integer(nchar(y)),as.integer(sktype),as.double(kpar(kernel)$lambda)) if(normalized == TRUE){ if(is.null(y)) ret <- t((1/sqrt(diag(ret)))*t(ret*(1/sqrt(diag(ret))))) else{ norm1 <- rep(0,length(x)) norm2 <- rep(0,length(y)) for( i in 1:length(x)) norm1[i] <- .Call("stringtv",as.character(x[i]),as.character(x[i]),as.integer(1),as.integer(nchar(x[i])),as.integer(nchar(x[i])),as.integer(sktype),as.double(kpar(kernel)$lambda)) for( i in 1:length(y)) norm2[i] <- .Call("stringtv",as.character(y[i]),as.character(y[i]),as.integer(1),as.integer(nchar(y[i])),as.integer(nchar(y[i])),as.integer(sktype),as.double(kpar(kernel)$lambda)) ret <- t((1/sqrt(norm2))*t(ret*(1/sqrt(norm1)))) } } } return(as.kernelMatrix(ret)) } setMethod("kernelMatrix",signature(kernel="stringkernel"),kernelMatrix.stringkernel) ## kernelMult computes kernel matrix - vector product ## function computing * z ( %*% z) kernelMult <- function(kernel, x, y=NULL, z, blocksize = 128) { # if(is.function(kernel)) ker <- deparse(substitute(kernel)) # kernel <- do.call(kernel, kpar) if(!is(x,"matrix")) stop("x must be a matrix") if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must ba a matrix or a vector") n <- nrow(x) if(is.null(y)) { ## check if z,x match z <- as.matrix(z) if(is.null(y)&&!dim(z)[1]==n) stop("z columns/length do not match x columns") res1 <- matrix(rep(0,n*n), ncol = n) for(i in 1:n) { for(j in i:n) { res1[j,i] <- kernel(x[i,],x[j,]) } } res1 <- res1 + t(res1) diag(res1) <- diag(res1)/2 } if (is(y,"matrix")) { m <- dim(y)[1] z <- as.matrix(z) if(!dim(z)[1] == m) stop("z has wrong dimension") res1 <- matrix(rep.int(0,m*n),ncol=m) for(i in 1:n) { for(j in 1:m) { res1[i,j] <- kernel(x[i,],y[j,]) } } } return(res1%*%z) } setGeneric("kernelMult", function(kernel, x, y=NULL, z, blocksize = 256) standardGeneric("kernelMult")) kernelMult.character <- function(kernel, x, y=NULL, z, blocksize = 256) { return(x%*%z) } setMethod("kernelMult",signature(kernel="character", x="kernelMatrix"),kernelMult.character) kernelMult.rbfkernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or a vector") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) sigma <- kpar(kernel)$sigma n <- dim(x)[1] m <- dim(x)[2] nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 dota <- as.matrix(rowSums(x^2)) if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) { dotab <- rep(1,blocksize)%*%t(dota) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- exp(sigma*(2*x[lowerl:upperl,]%*%t(x) - dotab - dota[lowerl:upperl]%*%t(rep.int(1,n))))%*%z lowerl <- upperl + 1 } } if(lowerl <= n) res[lowerl:n,] <- exp(sigma*(2*x[lowerl:n,]%*%t(x) - rep.int(1,n+1-lowerl)%*%t(dota) - dota[lowerl:n]%*%t(rep.int(1,n))))%*%z } if(is(y,"matrix")) { n2 <- dim(y)[1] z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) dotb <- as.matrix(rowSums(y*y)) if(nblocks > 0) { dotbb <- rep(1,blocksize)%*%t(dotb) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- exp(sigma*(2*x[lowerl:upperl,]%*%t(y) - dotbb - dota[lowerl:upperl]%*%t(rep.int(1,n2))))%*%z lowerl <- upperl + 1 } } if(lowerl <= n) res[lowerl:n,] <- exp(sigma*(2*x[lowerl:n,]%*%t(y) - rep.int(1,n+1-lowerl)%*%t(dotb) - dota[lowerl:n]%*%t(rep.int(1,n2))))%*%z } return(res) } setMethod("kernelMult",signature(kernel="rbfkernel"),kernelMult.rbfkernel) kernelMult.laplacekernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or a vector") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) sigma <- kpar(kernel)$sigma n <- dim(x)[1] m <- dim(x)[2] nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 dota <- as.matrix(rowSums(x^2)) if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) { dotab <- rep(1,blocksize)%*%t(dota) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- exp(-sigma*sqrt(-round(2*x[lowerl:upperl,]%*%t(x) - dotab - dota[lowerl:upperl]%*%t(rep.int(1,n)),9)))%*%z lowerl <- upperl + 1 } } if(lowerl <= n) res[lowerl:n,] <- exp(-sigma*sqrt(-round(2*x[lowerl:n,]%*%t(x) - rep.int(1,n+1-lowerl)%*%t(dota) - dota[lowerl:n]%*%t(rep.int(1,n)),9)))%*%z } if(is(y,"matrix")) { n2 <- dim(y)[1] z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) dotb <- as.matrix(rowSums(y*y)) if(nblocks > 0) { dotbb <- rep(1,blocksize)%*%t(dotb) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- exp(-sigma*sqrt(-round(2*x[lowerl:upperl,]%*%t(y) - dotbb - dota[lowerl:upperl]%*%t(rep.int(1,n2)),9)))%*%z lowerl <- upperl + 1 } } if(lowerl <= n) res[lowerl:n,] <- exp(-sigma*sqrt(-round(2*x[lowerl:n,]%*%t(y) - rep.int(1,n+1-lowerl)%*%t(dotb) - dota[lowerl:n]%*%t(rep.int(1,n2)),9)))%*%z } return(res) } setMethod("kernelMult",signature(kernel="laplacekernel"),kernelMult.laplacekernel) kernelMult.besselkernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) sigma <- kpar(kernel)$sigma nu <- kpar(kernel)$order ni <- kpar(kernel)$degree n <- dim(x)[1] m <- dim(x)[2] nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 lim <- 1/(gamma(nu+1)*2^(nu)) dota <- as.matrix(rowSums(x^2)) if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) { dotab <- rep(1,blocksize)%*%t(dota) for(i in 1:nblocks) { upperl = upperl + blocksize xx <- sigma*sqrt(-round(2*x[lowerl:upperl,]%*%t(x) - dotab - dota[lowerl:upperl]%*%t(rep.int(1,n)),9)) res1 <- besselJ(xx,nu)*(xx^(-nu)) res1[which(xx<10e-5)] <- lim res[lowerl:upperl,] <- ((res1/lim)^ni)%*%z lowerl <- upperl + 1 } } if(lowerl <= n) { xx <- sigma*sqrt(-round(2*x[lowerl:n,]%*%t(x) - rep.int(1,n+1-lowerl)%*%t(dota) - dota[lowerl:n]%*%t(rep.int(1,n)),9)) res1 <- besselJ(xx,nu)*(xx^(-nu)) res1[which(xx<10e-5)] <- lim res[lowerl:n,] <- ((res1/lim)^ni)%*%z } } if(is(y,"matrix")) { n2 <- dim(y)[1] z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) dotb <- as.matrix(rowSums(y*y)) if(nblocks > 0) { dotbb <- rep(1,blocksize)%*%t(dotb) for(i in 1:nblocks) { upperl = upperl + blocksize xx <- sigma*sqrt(-round(2*x[lowerl:upperl,]%*%t(y) - dotbb - dota[lowerl:upperl]%*%t(rep.int(1,n2)),9)) res1 <- besselJ(xx,nu)*(xx^(-nu)) res1[which(xx < 10e-5)] <- lim res[lowerl:upperl,] <- ((res1/lim)^ni)%*%z lowerl <- upperl + 1 } } if(lowerl <= n) { xx <- sigma*sqrt(-round(2*x[lowerl:n,]%*%t(y) - rep.int(1,n+1-lowerl)%*%t(dotb) - dota[lowerl:n]%*%t(rep.int(1,n2)),9)) res1 <- besselJ(xx,nu)*(xx^(-nu)) res1[which(xx < 10e-5)] <- lim res[lowerl:n,] <- ((res1/lim)^ni)%*%z } } return(res) } setMethod("kernelMult",signature(kernel="besselkernel"),kernelMult.besselkernel) kernelMult.anovakernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or a vector") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) sigma <- kpar(kernel)$sigma degree <- kpar(kernel)$degree n <- dim(x)[1] m <- dim(x)[2] nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) { a <- matrix(0,m,blocksize) re <- matrix(0, n, blocksize) for(i in 1:nblocks) { upperl = upperl + blocksize for(j in 1:n) { a[rep(TRUE,m),rep(TRUE,blocksize)] <- x[j,] re[j,] <- colSums(exp( - sigma*(a - t(x[lowerl:upperl,]))^2))^degree } res[lowerl:upperl,] <- t(re)%*%z lowerl <- upperl + 1 } } if(lowerl <= n){ a <- matrix(0,m,n-lowerl+1) re <- matrix(0,n,n-lowerl+1) for(j in 1:n) { a[rep(TRUE,m),rep(TRUE,n-lowerl+1)] <- x[j,] re[j,] <- colSums(exp( - sigma*(a - t(x[lowerl:n,,drop=FALSE]))^2))^degree } res[lowerl:n,] <- t(re)%*%z } } if(is(y,"matrix")) { n2 <- dim(y)[1] nblocks <- floor(n2/blocksize) z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) { b <- matrix(0, m, blocksize) re <- matrix(0, n, blocksize) for(i in 1:nblocks) { upperl = upperl + blocksize for(j in 1:n) { b[rep(TRUE,dim(x)[2]), rep(TRUE,blocksize)] <- x[j,] re[j,]<- colSums(exp( - sigma*(b - t(y[lowerl:upperl,]))^2)^degree) } res[,1] <- res[,1] + re %*%z[lowerl:upperl,] lowerl <- upperl + 1 } } if(lowerl <= n) { b <- matrix(0, dim(x)[2], n2-lowerl+1) re <- matrix(0, n, n2-lowerl+1) for( i in 1:n) { b[rep(TRUE,dim(x)[2]),rep(TRUE,n2-lowerl+1)] <- x[i,] re[i,]<- colSums(exp( - sigma*(b - t(y[lowerl:n2,,drop=FALSE]))^2)^degree) } res[,1] <- res[,1] + re%*%z[lowerl:n2] } } return(res) } setMethod("kernelMult",signature(kernel="anovakernel"),kernelMult.anovakernel) kernelMult.splinekernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or a vector") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") n <- dim(x)[1] m <- dim(x)[2] if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) x <- t(x) if(nblocks > 0) { re <- matrix(0, dim(z)[1], blocksize) for(i in 1:nblocks) { upperl = upperl + blocksize for (j in lowerl:upperl) { dr <- x + x[ , j] dp <- x * x[ , j] dm <- pmin(x,x[,j]) re[,j-(i-1)*blocksize] <- apply((1 + dp + dp*dm - (dr/2)*dm^2 + (dm^3)/3),2, prod) } res[lowerl:upperl,] <- crossprod(re,z) lowerl <- upperl + 1 } } if(lowerl <= n){ a <- matrix(0,m,n-lowerl+1) re <- matrix(0,dim(z)[1],n-lowerl+1) for(j in lowerl:(n-lowerl+1)) { dr <- x + x[ , j] dp <- x * x[ , j] dm <- pmin(x,x[,j]) re[,j-nblocks*blocksize] <- apply((1 + dp + dp*dm - (dr/2)*dm^2 + (dm^3)/3),2, prod) } res[lowerl:n,] <- crossprod(re,z) } } if(is(y,"matrix")) { n2 <- dim(y)[1] nblocks <- floor(n2/blocksize) z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) x <- t(x) y <- t(y) if(nblocks > 0) { re <- matrix(0, dim(z)[1], blocksize) for(i in 1:nblocks) { upperl = upperl + blocksize for(j in lowerl:upperl) { dr <- y + x[ , j] dp <- y * x[ , j] dm <- pmin(y,x[,j]) re[,j-(i-1)*blocksize] <- apply((1 + dp + dp*dm - (dr/2)*dm^2 + (dm^3)/3),2, prod) } res[lowerl:upperl] <- crossprod(re, z) lowerl <- upperl + 1 } } if(lowerl <= n) { b <- matrix(0, dim(x)[2], n-lowerl+1) re <- matrix(0, dim(z)[1], n-lowerl+1) for(j in lowerl:(n-lowerl+1)) { dr <- y + x[, j] dp <- y * x[, j] dm <- pmin(y,x[,j]) re[,j-nblocks*blocksize] <- apply((1 + dp + dp*dm - (dr/2)*dm^2 + (dm^3)/3),2, prod) } res[lowerl:n] <- crossprod(re, z) } } return(res) } setMethod("kernelMult",signature(kernel="splinekernel"),kernelMult.splinekernel) kernelMult.polykernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) degree <- kpar(kernel)$degree scale <- kpar(kernel)$scale offset <- kpar(kernel)$offset n <- dim(x)[1] m <- dim(x)[2] nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- ((scale*x[lowerl:upperl,]%*%t(x) + offset)^degree) %*% z lowerl <- upperl + 1 } if(lowerl <= n) res[lowerl:n,] <- ((scale*x[lowerl:n,]%*%t(x) +offset)^degree)%*%z } if(is(y,"matrix")) { n2 <- dim(y)[1] z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- ((scale*x[lowerl:upperl,]%*%t(y) + offset)^degree)%*%z lowerl <- upperl + 1 } if(lowerl <= n) res[lowerl:n,] <- ((scale*x[lowerl:n,]%*%t(y) + offset)^degree)%*%z } return(res) } setMethod("kernelMult",signature(kernel="polykernel"),kernelMult.polykernel) kernelMult.tanhkernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or a vector") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) scale <- kpar(kernel)$scale offset <- kpar(kernel)$offset n <- dim(x)[1] m <- dim(x)[2] nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- tanh(scale*x[lowerl:upperl,]%*%t(x) + offset) %*% z lowerl <- upperl + 1 } if(lowerl <= n) res[lowerl:n,] <- tanh(scale*x[lowerl:n,]%*%t(x) +offset)%*%z } if(is(y,"matrix")) { n2 <- dim(y)[1] z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- matrix(rep(0,dim(z)[2]*n), ncol = dim(z)[2]) if(nblocks > 0) for(i in 1:nblocks) { upperl = upperl + blocksize res[lowerl:upperl,] <- tanh(scale*x[lowerl:upperl,]%*%t(y) + offset)%*%z lowerl <- upperl + 1 } if(lowerl <= n) res[lowerl:n,] <- tanh(scale*x[lowerl:n,]%*%t(y) + offset)%*%z } return(res) } setMethod("kernelMult",signature(kernel="tanhkernel"),kernelMult.tanhkernel) kernelMult.vanillakernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or vector") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") n <- dim(x)[1] m <- dim(x)[2] if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if (is.null(y)) { z <- as.matrix(z) if(!dim(z)[1]==n) stop("z rows must equal x rows") res <- t(crossprod(crossprod(x,z),t(x))) } if(is(y,"matrix")) { n2 <- dim(y)[1] z <- as.matrix(z) if(!dim(z)[1]==n2) stop("z length must equal y rows") res <- t(crossprod(crossprod(y,z),t(x))) } return(res) } setMethod("kernelMult",signature(kernel="vanillakernel"),kernelMult.vanillakernel) kernelMult.stringkernel <- function(kernel, x, y=NULL, z, blocksize = 256) { if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") normalized = kpar(kernel)$normalized n <- length(x) res1 <- matrix(rep(0,n*n), ncol = n) resdiag <- rep(0,n) if(is(x,"list")) x <- sapply(x,paste,collapse="") if(is(y,"list")) y <- sapply(y,paste,collapse="") if (kpar(kernel)$type == "sequence" |kpar(kernel)$type == "string"|kpar(kernel)$type == "fullstring") { if(normalized == TRUE) kernel <- stringdot(length = kpar(kernel)$length, type = kpar(kernel)$type, lambda = kpar(kernel)$lambda, normalized = FALSE) ## y is null if(is.null(y)){ if(normalized == TRUE){ z <- as.matrix(z) if(dim(z)[1]!= n) stop("z rows must be equal to x length") dz <- dim(z)[2] vres <- matrix(0,n,dz) ## calculate diagonal elements first, and use them to normalize for (i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:n) { for(j in (i:n)[-1]) { res1[i,j] <- kernel(x[[i]],x[[j]])/sqrt(resdiag[i]*resdiag[j]) } } res1 <- res1 + t(res1) diag(res1) <- rep(1,n) vres <- res1 %*% z } else{ z <- as.matrix(z) if(dim(z)[1]!= n) stop("z rows must be equal to x length") dz <- dim(z)[2] vres <- matrix(0,n,dz) ## calculate diagonal elements first, and use them to normalize for (i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:n) { for(j in (i:n)[-1]) { res1[i,j] <- kernel(x[[i]],x[[j]]) } } res1 <- res1 + t(res1) diag(res1) <- resdiag vres <- res1 %*% z } } if (!is.null(y)){ if(normalized == TRUE){ nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 m <- length(y) z <- as.matrix(z) if(dim(z)[1]!= m) stop("z rows must be equal to y length") resdiag1 <- rep(0,m) dz <- dim(z)[2] vres <- matrix(0,n,dz) for(i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:m) resdiag1[i] <- kernel(y[[i]],y[[i]]) if (nblocks > 0){ res1 <- matrix(0,blocksize,m) for(k in 1:nblocks){ upperl <- upperl + blocksize for(i in lowerl:(upperl)) { for(j in 1:m) { res1[i - (k-1)*blocksize,j] <- kernel(x[[i]],y[[j]])/sqrt(resdiag[i]*resdiag1[j]) } } vres[lowerl:upperl,] <- res1 %*% z lowerl <- upperl +1 } } if(lowerl <= n) { res1 <- matrix(0,n-lowerl+1,m) for(i in lowerl:n) { for(j in 1:m) { res1[i - nblocks*blocksize,j] <- kernel(x[[i]],y[[j]])/sqrt(resdiag[i]*resdiag1[j]) } } vres[lowerl:n,] <- res1 %*% z } } else { nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 m <- length(y) z <- as.matrix(z) if(dim(z)[1]!= m) stop("z rows must be equal to y length") dz <- dim(z)[2] vres <- matrix(0,n,dz) if (nblocks > 0){ res1 <- matrix(0,blocksize,m) for(k in 1:nblocks){ upperl <- upperl + blocksize for(i in lowerl:(upperl)) { for(j in 1:m) { res1[i - (k-1)*blocksize, j] <- kernel(x[[i]],y[[j]]) } } vres[lowerl:upperl,] <- res1 %*% z lowerl <- upperl +1 } } if(lowerl <= n) { res1 <- matrix(0,n-lowerl+1,m) for(i in lowerl:n) { for(j in 1:m) { res1[i - nblocks*blocksize,j] <- kernel(x[[i]],y[[j]]) } } vres[lowerl:n,] <- res1 %*% z } } } } else { switch(kpar(kernel)$type, "exponential" = sktype <- 2, "constant" = sktype <- 1, "spectrum" = sktype <- 3, "boundrange" = sktype <- 4) if(sktype==3 &(any(nchar(x) < kpar(kernel)$length)|any(nchar(x) < kpar(kernel)$length))) stop("spectral kernel does not accept strings shorter than the length parameter") x <- paste(x,"\n",sep="") if(!is.null(y)) y <- paste(y,"\n",sep="") ## y is null if(is.null(y)){ if(normalized == TRUE){ nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 z <- as.matrix(z) if(dim(z)[1]!= n) stop("z rows must be equal to y length") dz <- dim(z)[2] vres <- matrix(0,n,dz) for (i in 1:n) resdiag[i] <- .Call("stringtv",as.character(x[i]),as.character(x[i]),as.integer(1),as.integer(nchar(x[i])),as.integer(nchar(x[i])),as.integer(sktype),as.double(kpar(kernel)$lambda)) if (nblocks > 0){ res1 <- matrix(0,blocksize,n) for(k in 1:nblocks){ upperl <- upperl + blocksize for(i in lowerl:(upperl)) { res1[i - (k-1)*blocksize, ] <- .Call("stringtv",as.character(x[i]),as.character(x),as.integer(length(x)),as.integer(nchar(x[i])),as.integer(nchar(x)),as.integer(sktype),as.double(kpar(kernel)$lambda))/sqrt(resdiag[i]*resdiag) } vres[lowerl:upperl,] <- res1 %*% z lowerl <- upperl +1 } } if(lowerl <= n) { res1 <- matrix(0,n-lowerl+1,n) for(i in lowerl:n) { res1[i - nblocks*blocksize,] <- .Call("stringtv",as.character(x[i]),as.character(x),as.integer(length(x)),as.integer(nchar(x[i])),as.integer(nchar(x)),as.integer(sktype),as.double(kpar(kernel)$lambda))/sqrt(resdiag[i]*resdiag) } vres[lowerl:n,] <- res1 %*% z } } else { nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 z <- as.matrix(z) if(dim(z)[1]!= n) stop("z rows must be equal to y length") dz <- dim(z)[2] vres <- matrix(0,n,dz) if (nblocks > 0){ res1 <- matrix(0,blocksize,n) for(k in 1:nblocks){ upperl <- upperl + blocksize for(i in lowerl:(upperl)) { res1[i - (k-1)*blocksize, ] <- .Call("stringtv",as.character(x[i]),as.character(x),as.integer(length(x)),as.integer(nchar(x[i])),as.integer(nchar(x)),as.integer(sktype),as.double(kpar(kernel)$lambda)) } vres[lowerl:upperl,] <- res1 %*% z lowerl <- upperl +1 } } if(lowerl <= n) { res1 <- matrix(0,n-lowerl+1,n) for(i in lowerl:n) { res1[i - nblocks*blocksize,] <- .Call("stringtv",as.character(x[i]),as.character(x),as.integer(length(x)),as.integer(nchar(x[i])),as.integer(nchar(x)),as.integer(sktype),as.double(kpar(kernel)$lambda)) } vres[lowerl:n,] <- res1 %*% z } } } if (!is.null(y)){ if(normalized == TRUE){ nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 m <- length(y) z <- as.matrix(z) if(dim(z)[1]!= m) stop("z rows must be equal to y length") resdiag1 <- rep(0,m) dz <- dim(z)[2] vres <- matrix(0,n,dz) for(i in 1:n) resdiag[i] <- .Call("stringtv",as.character(x[i]),as.character(x[i]),as.integer(1),as.integer(nchar(x[i])),as.integer(nchar(x[i])),as.integer(sktype),as.double(kpar(kernel)$lambda)) for(i in 1:m) resdiag1[i] <- .Call("stringtv",as.character(y[i]),as.character(y[i]),as.integer(1),as.integer(nchar(y[i])),as.integer(nchar(y[i])),as.integer(sktype),as.double(kpar(kernel)$lambda)) if (nblocks > 0){ res1 <- matrix(0,blocksize,m) for(k in 1:nblocks){ upperl <- upperl + blocksize for(i in lowerl:(upperl)) { res1[i - (k-1)*blocksize, ] <- .Call("stringtv",as.character(x[i]),as.character(y),as.integer(length(y)),as.integer(nchar(x[i])),as.integer(nchar(y)),as.integer(sktype),as.double(kpar(kernel)$lambda))/sqrt(resdiag[i]*resdiag1) } vres[lowerl:upperl,] <- res1 %*% z lowerl <- upperl +1 } } if(lowerl <= n) { res1 <- matrix(0,n-lowerl+1,m) for(i in lowerl:n) { res1[i - nblocks*blocksize,] <- .Call("stringtv",as.character(x[i]),as.character(y),as.integer(length(y)),as.integer(nchar(x[i])),as.integer(nchar(y)),as.integer(sktype),as.double(kpar(kernel)$lambda))/sqrt(resdiag[i]*resdiag1) } vres[lowerl:n,] <- res1 %*% z } } else { nblocks <- floor(n/blocksize) lowerl <- 1 upperl <- 0 m <- length(y) z <- as.matrix(z) if(dim(z)[1]!= m) stop("z rows must be equal to y length") dz <- dim(z)[2] vres <- matrix(0,n,dz) if (nblocks > 0){ res1 <- matrix(0,blocksize,m) for(k in 1:nblocks){ upperl <- upperl + blocksize for(i in lowerl:(upperl)) { res1[i - (k-1)*blocksize, ] <- .Call("stringtv",as.character(x[i]),as.character(y),as.integer(length(y)),as.integer(nchar(x[i])),as.integer(nchar(y)),as.integer(sktype),as.double(kpar(kernel)$lambda)) } vres[lowerl:upperl,] <- res1 %*% z lowerl <- upperl +1 } } if(lowerl <= n) { res1 <- matrix(0,n-lowerl+1,m) for(i in lowerl:n) { res1[i - nblocks*blocksize,] <- .Call("stringtv",as.character(x[i]),as.character(y),as.integer(length(y)),as.integer(nchar(x[i])),as.integer(nchar(y)),as.integer(sktype),as.double(kpar(kernel)$lambda)) } vres[lowerl:n,] <- res1 %*% z } } } } return(vres) } setMethod("kernelMult",signature(kernel="stringkernel"),kernelMult.stringkernel) ## kernelPol return the quadratic form of a kernel matrix ## kernelPol returns the scalar product of x y componentwise with polarities ## of z and k kernelPol <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(x,"matrix")) stop("x must be a matrix") if(!is(y,"matrix")&&!is.null(y)) stop("y must be a matrix") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must ba a matrix or a vector") n <- nrow(x) z <- as.matrix(z) if(!dim(z)[1]==n) stop("z must have the length equal to x colums") res1 <- matrix(rep(0,n*n), ncol = n) if (is.null(y)) { for(i in 1:n) { for(j in i:n) { res1[i,j] <- kernel(x[i,],x[j,])*z[j]*z[i] } } res1 <- res1 + t(res1) diag(res1) <- diag(res1)/2 } if (is(x,"matrix") && is(y,"matrix")){ m <- dim(y)[1] if(is.null(k)) stop("k not specified!") k <- as.matrix(k) if(!dim(x)[2]==dim(y)[2]) stop("matrixes must have the same number of columns") if(!dim(z)[2]==dim(k)[2]) stop("z and k vectors must have the same number of columns") if(!dim(x)[1]==dim(z)[1]) stop("z and x must have the same number of rows") if(!dim(y)[1]==dim(k)[1]) stop("y and k must have the same number of rows") res1 <- matrix(0,dim(x)[1],dim(y)[1]) for(i in 1:n) { for(j in 1:m) { res1[i,j] <- kernel(x[i,],y[j,])*z[i]*k[j] } } } return(res1) } setGeneric("kernelPol", function(kernel, x, y=NULL, z, k = NULL) standardGeneric("kernelPol")) kernelPol.rbfkernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix a vector or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) sigma <- kpar(kernel)$sigma n <- dim(x)[1] dota <- rowSums(x*x)/2 z <- as.matrix(z) if(!dim(z)[1]==n) stop("z must have the length equal to x colums") if (is.null(y)) { if(is(z,"matrix")&&!dim(z)[1]==n) stop("z must have size equal to x colums") res <- crossprod(t(x)) for (i in 1:n) res[i,] <- z[i,]*(exp(2*sigma*(res[i,] - dota - rep(dota[i],n)))*z) return(res) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] k <- as.matrix(k) if(!dim(k)[1]==m) stop("k must have equal rows to y") if(!dim(x)[2]==dim(y)[2]) stop("matrixes must have the same number of columns") dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m)#2*sigma or sigma res[,i]<- k[i,]*(exp(2*sigma*(res[,i] - dota - rep(dotb[i],n)))*z) return(res) } } setMethod("kernelPol",signature(kernel="rbfkernel"),kernelPol.rbfkernel) kernelPol.laplacekernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix, vector or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") sigma <- kpar(kernel)$sigma if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) n <- dim(x)[1] dota <- rowSums(x*x)/2 z <- as.matrix(z) if(!dim(z)[1]==n) stop("z must have the length equal to x colums") if (is.null(y)) { if(is(z,"matrix")&&!dim(z)[1]==n) stop("z must have size equal to x colums") res <- crossprod(t(x)) for (i in 1:n) res[i,] <- z[i,]*(exp(-sigma*sqrt(-round(2*(res[i,] - dota - rep(dota[i],n)),9)))*z) return(res) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] k <- as.matrix(k) if(!dim(k)[1]==m) stop("k must have equal rows to y") if(!dim(x)[2]==dim(y)[2]) stop("matrixes must have the same number of columns") dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m)#2*sigma or sigma res[,i]<- k[i,]*(exp(-sigma*sqrt(-round(2*(res[,i] - dota - rep(dotb[i],n)),9)))*z) return(res) } } setMethod("kernelPol",signature(kernel="laplacekernel"),kernelPol.laplacekernel) kernelPol.besselkernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") sigma <- kpar(kernel)$sigma if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) nu <- kpar(kernel)$order ni <- kpar(kernel)$degree n <- dim(x)[1] lim <- 1/(gamma(nu + 1)*2^nu) dota <- rowSums(x*x)/2 z <- as.matrix(z) if(!dim(z)[1]==n) stop("z must have the length equal to x colums") if (is.null(y)) { if(is(z,"matrix")&&!dim(z)[1]==n) stop("z must have size equal to x colums") res <- crossprod(t(x)) for (i in 1:n) { xx <- sigma*sqrt(-round(2*(res[i,] - dota - rep(dota[i],n)),9)) res[i,] <- besselJ(xx,nu)*(xx^(-nu)) res[i,which(xx < 10e-5)] <- lim res[i,] <- z[i,]*(((res[i,]/lim)^ni)*z) } return(res) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] if(!dim(k)[1]==m) stop("k must have equal rows to y") k <- as.matrix(k) if(!dim(x)[2]==dim(y)[2]) stop("matrixes must have the same number of columns") dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m){#2*sigma or sigma xx <- sigma*sqrt(-round(2*(res[,i] - dota - rep(dotb[i],n)),9)) res[,i] <- besselJ(xx,nu)*(xx^(-nu)) res[which(xx<10e-5),i] <- lim res[,i]<- k[i,]*(((res[,i]/lim)^ni)*z) } return(res) } } setMethod("kernelPol",signature(kernel="besselkernel"),kernelPol.besselkernel) kernelPol.anovakernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") sigma <- kpar(kernel)$sigma degree <- kpar(kernel)$degree if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) n <- dim(x)[1] z <- as.matrix(z) if(!dim(z)[1]==n) stop("z must have the length equal to x colums") if (is.null(y)) { if(is(z,"matrix")&&!dim(z)[1]==n) stop("z must have size equal to x colums") a <- matrix(0, dim(x)[2], n) res <- matrix(0,n,n) for (i in 1:n) { a[rep(TRUE,dim(x)[2]), rep(TRUE,n)] <- x[i,] res[i,]<- z[i,]*((colSums(exp( - sigma*(a - t(x))^2))^degree)*z) } return(res) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] k <- as.matrix(k) if(!dim(k)[1]==m) stop("k must have equal rows to y") if(!dim(x)[2]==dim(y)[2]) stop("matrixes must have the same number of columns") b <- matrix(0, dim(x)[2],m) res <- matrix(0, dim(x)[1],m) for( i in 1:n) { b[rep(TRUE,dim(x)[2]), rep(TRUE,m)] <- x[i,] res[i,] <- z[i,]*((colSums(exp( - sigma*(b - t(y))^2))^degree)*k) } return(res) } } setMethod("kernelPol",signature(kernel="anovakernel"),kernelPol.anovakernel) kernelPol.splinekernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) sigma <- kpar(kernel)$sigma degree <- kpar(kernel)$degree n <- dim(x)[1] z <- as.vector(z) if(!(length(z)==n)) stop("z must have the length equal to x colums") if (is.null(y)) { res <- kernelMatrix(kernel,x) return(unclass(z*t(res*z))) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] k <- as.vector(k) if(!(length(k)==m)) stop("k must have length equal to rows of y") res <- kernelMatrix(kernel,x,y) return(unclass(k*t(res*z))) } } setMethod("kernelPol",signature(kernel="splinekernel"),kernelPol.splinekernel) kernelPol.polykernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) degree <- kpar(kernel)$degree scale <- kpar(kernel)$scale offset <- kpar(kernel)$offset n <- dim(x)[1] if(is(z,"matrix")) { z <- as.vector(z) } m <- length(z) if(!(m==n)) stop("z must have the length equal to x colums") if (is.null(y)) { res <- z*t(((scale*crossprod(t(x))+offset)^degree)*z) return(res) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] k <- as.vector(k) if(!(length(k)==m)) stop("k must have length equal to rows of y") if(!dim(x)[2]==dim(y)[2]) stop("matrixes must have the same number of columns") res<- k*t(((scale*x%*%t(y) + offset)^degree)*z) return(res) } } setMethod("kernelPol",signature(kernel="polykernel"),kernelPol.polykernel) kernelPol.tanhkernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix, vector or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) scale <- kpar(kernel)$scale offset <- kpar(kernel)$offset n <- dim(x)[1] if(is(z,"matrix")) { z <- as.vector(z) } m <- length(z) if(!(m==n)) stop("z must have the length equal to x colums") if (is.null(y)) { res <- z*t(tanh(scale*crossprod(t(x))+offset)*z) return(res) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] k <- as.vector(k) if(!(length(k)==m)) stop("k must have length equal rows to y") if(!dim(x)[2]==dim(y)[2]) stop("matrixes x, y must have the same number of columns") res<- k*t(tanh(scale*x%*%t(y) + offset)*z) return(res) } } setMethod("kernelPol",signature(kernel="tanhkernel"),kernelPol.tanhkernel) kernelPol.vanillakernel <- function(kernel, x, y=NULL, z, k=NULL) { if(!is(y,"matrix")&&!is.null(y)&&!is(y,"vector")) stop("y must be a matrix, vector or NULL") if(!is(z,"matrix")&&!is(z,"vector")) stop("z must be a matrix or a vector") if(!is(k,"matrix")&&!is(k,"vector")&&!is.null(k)) stop("k must be a matrix or a vector") n <- dim(x)[1] if(is(z,"matrix")) { z <- as.vector(z) } m <- length(z) if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!(m==n)) stop("z must have the length equal to x colums") if (is.null(y)) { res <- z*t(crossprod(t(x))*z) return(res) } if (is(y,"matrix")) { if(is.null(k)) stop("k not specified!") m <- dim(y)[1] k <- as.vector(k) if(!length(k)==m) stop("k must have length equal rows to y") if(!dim(x)[2]==dim(y)[2]) stop("matrixes x, y must have the same number of columns") for( i in 1:m) res<- k*t(x%*%t(y)*z) return(res) } } setMethod("kernelPol",signature(kernel="vanillakernel"),kernelPol.vanillakernel) kernelPol.stringkernel <- function(kernel, x, y=NULL ,z ,k=NULL) { n <- length(x) res1 <- matrix(rep(0,n*n), ncol = n) resdiag <- rep(0,n) if(is(x,"list")) x <- sapply(x,paste,collapse="") if(is(y,"list")) y <- sapply(y,paste,collapse="") normalized = kpar(kernel)$normalized if(normalized == TRUE) kernel <- stringdot(length = kpar(kernel)$length, type = kpar(kernel)$type, lambda = kpar(kernel)$lambda, normalized = FALSE) z <- as.matrix(z) ## y is null if (kpar(kernel)$type == "sequence" |kpar(kernel)$type == "string"|kpar(kernel)$type == "fullstring") { if(is.null(y)){ if(normalized == TRUE){ ## calculate diagonal elements first, and use them to normalize for (i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:n) { for(j in (i:n)[-1]) { res1[i,j] <- (z[i,]*kernel(x[[i]],x[[j]])*z[j,])/sqrt(resdiag[i]*resdiag[j]) } } res1 <- res1 + t(res1) diag(res1) <- z^2 } else { for (i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:n) { for(j in (i:n)[-1]) { res1[i,j] <- (z[i,]*kernel(x[[i]],x[[j]])*z[j,]) } } res1 <- res1 + t(res1) diag(res1) <- resdiag * z^2 } } if (!is.null(y)){ if(normalized == TRUE){ m <- length(y) res1 <- matrix(0,n,m) resdiag1 <- rep(0,m) k <- as.matrix(k) for(i in 1:n) resdiag[i] <- kernel(x[[i]],x[[i]]) for(i in 1:m) resdiag1[i] <- kernel(y[[i]],y[[i]]) for(i in 1:n) { for(j in 1:m) { res1[i,j] <- (z[i,]*kernel(x[[i]],y[[j]])*k[j,])/sqrt(resdiag[i]*resdiag1[j]) } } } } else{ m <- length(y) res1 <- matrix(0,n,m) k <- as.matrix(k) for(i in 1:n) { for(j in 1:m) { res1[i,j] <- (z[i,]*kernel(x[[i]],y[[j]])*k[j,]) } } } } else { switch(kpar(kernel)$type, "exponential" = sktype <- 2, "constant" = sktype <- 1, "spectrum" = sktype <- 3, "boundrange" = sktype <- 4) if(is(x,"list")) x <- unlist(x) if(is(y,"list")) y <- unlist(y) x <- paste(x,"\n",seq="") if(!is.null(y)) y <- paste(y,"\n",seq="") if(is.null(y)) ret <- matrix(0, length(x),length(x)) else ret <- matrix(0,length(x),length(y)) if(is.null(y)){ for( i in 1:length(x)) ret[i,] <- .Call("stringtv",as.character(x[i]),as.character(x),as.integer(length(x)),as.integer(nchar(x[i])),as.integer(nchar(x)),as.integer(sktype),as.double(kpar(kernel)$lambda)) res1 <- k*ret*k } else{ for( i in 1:length(x)) ret[i,] <- .Call("stringtv",as.character(x[i]),as.character(y),as.integer(length(x)),as.integer(nchar(x[i])),as.integer(nchar(y)),as.integer(sktype),as.double(kpar(kernel)$lambda)) res1 <- k*ret*z } if(normalized == TRUE){ if(is.null(y)){ ret <- t((1/sqrt(diag(ret)))*t(ret*(1/sqrt(diag(ret))))) res1 <- k*ret*k } else{ norm1 <- rep(0,length(x)) norm2 <- rep(0,length(y)) for( i in 1:length(x)) norm1[i] <- .Call("stringtv",as.character(x[i]),as.character(x[i]),as.integer(1),as.integer(nchar(x[i])),as.integer(nchar(x[i])),as.integer(sktype),as.double(kpar(kernel)$lambda)) for( i in 1:length(y)) norm2[i] <- .Call("stringtv",as.character(y[i]),as.character(y[i]),as.integer(1),as.integer(nchar(y[i])),as.integer(nchar(y[i])),as.integer(sktype),as.double(kpar(kernel)$lambda)) ret <- t((1/sqrt(norm2))*t(ret*(1/sqrt(norm1)))) res1 <- k*ret*z } } } return(res1) } setMethod("kernelPol",signature(kernel="stringkernel"),kernelPol.stringkernel) ## kernelFast returns the kernel matrix, its usefull in algorithms ## which require iterative kernel matrix computations kernelFast <- function(kernel, x, y, a) { return(kernelMatrix(kernel,x,y)) } setGeneric("kernelFast",function(kernel, x, y, a) standardGeneric("kernelFast")) kernelFast.rbfkernel <- function(kernel, x, y, a) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma n <- dim(x)[1] dota <- a/2 if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m) res[,i]<- exp(2*sigma*(res[,i] - dota - rep(dotb[i],n))) return(res) } } setMethod("kernelFast",signature(kernel="rbfkernel"),kernelFast.rbfkernel) kernelFast.laplacekernel <- function(kernel, x, y, a) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma n <- dim(x)[1] dota <- a/2 if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m) res[,i]<- exp(-sigma*sqrt(round(-2*(res[,i] - dota - rep(dotb[i],n)),9))) return(res) } } setMethod("kernelFast",signature(kernel="laplacekernel"),kernelFast.laplacekernel) kernelFast.besselkernel <- function(kernel, x, y, a) { if(is(x,"vector")) x <- as.matrix(x) if(is(y,"vector")) y <- as.matrix(y) if(!is(y,"matrix")) stop("y must be a matrix or a vector") sigma = kpar(kernel)$sigma nu = kpar(kernel)$order ni = kpar(kernel)$degree n <- dim(x)[1] lim <- 1/(gamma(nu+1)*2^(nu)) dota <- a/2 if (is(x,"matrix") && is(y,"matrix")){ if (!(dim(x)[2]==dim(y)[2])) stop("matrixes must have the same number of columns") m <- dim(y)[1] dotb <- rowSums(y*y)/2 res <- x%*%t(y) for( i in 1:m){ xx <- sigma*sqrt(round(-2*(res[,i] - dota - rep(dotb[i],n)),9)) res[,i] <- besselJ(xx,nu)*(xx^(-nu)) res[which(xx<10e-5),i] <- lim } return((res/lim)^ni) } } setMethod("kernelFast",signature(kernel="besselkernel"),kernelFast.besselkernel) kernelFast.anovakernel <- function(kernel, x, y, a) { return(kernelMatrix(kernel,x,y)) } setMethod("kernelFast",signature(kernel="anovakernel"),kernelFast.anovakernel) kernelFast.polykernel <- function(kernel, x, y, a) { return(kernelMatrix(kernel,x,y)) } setMethod("kernelFast",signature(kernel="polykernel"),kernelFast.polykernel) kernelFast.vanilla <- function(kernel, x, y, a) { return(kernelMatrix(kernel,x,y)) } setMethod("kernelFast",signature(kernel="vanillakernel"),kernelFast.vanilla) kernelFast.tanhkernel <- function(kernel, x, y, a) { return(kernelMatrix(kernel,x,y)) } setMethod("kernelFast",signature(kernel="tanhkernel"),kernelFast.tanhkernel) kernelFast.stringkernel <- function(kernel, x, y, a) { return(kernelMatrix(kernel,x,y)) } setMethod("kernelFast",signature(kernel="stringkernel"),kernelFast.stringkernel) kernelFast.splinekernel <- function(kernel, x, y, a) { return(kernelMatrix(kernel,x,y)) } setMethod("kernelFast",signature(kernel="splinekernel"),kernelFast.splinekernel) kernlab/R/ranking.R0000644000175100001440000002203011304023134013647 0ustar hornikusers## manifold ranking ## author: alexandros setGeneric("ranking",function(x, ...) standardGeneric("ranking")) setMethod("ranking",signature(x="matrix"), function (x, y, kernel = "rbfdot", kpar = list(sigma = 1), scale = FALSE, alpha = 0.99, iterations = 600, edgegraph = FALSE, convergence = FALSE, ...) { m <- dim(x)[1] d <- dim(x)[2] if(length(y) != m) { ym <- matrix(0,m,1) ym[y] <- 1 y <- ym } if (is.null(y)) y <- matrix(1, m, 1) labelled <- y != 0 if (!any(labelled)) stop("no labels sublied") if(is.character(kernel)) kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","besseldot","laplacedot")) if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if(scale) x <- scale(x) ## scaling from ksvm ## normalize ? if (is(kernel)[1]=='rbfkernel' && edgegraph){ sigma = kpar(kernel)$sigma n <- dim(x)[1] dota <- rowSums(x*x)/2 sed <- crossprod(t(x)) for (i in 1:n) sed[i,] <- - 2*(sed[i,] - dota - rep(dota[i],n)) diag(sed) <- 0 K <- exp(- sigma * sed) mst <- minimum.spanning.tree(sed) algo.mst <- mst$E max.squared.edge.length <- mst$max.sed.in.tree edgegraph <- (sed <= max.squared.edge.length) K[!edgegraph] <- 0 ##algo.edge.graph <- sparse(algo.edge.graph) rm(sed) gc() } else { edgegraph <- matrix() K <- kernelMatrix(kernel,x) } if (edgegraph && is(kernel)[1]!="rbfkernel"){ warning('edge graph is only implemented for use with the RBF kernel') edgegraph <- matrix() } diag(K) <- 0 ##K <- sparse(K) cs <- colSums(K) ##cs[cs <= 10e-6] <- 1 D <- 1/sqrt(cs) K <- D * K %*% diag(D) if(sum(labelled)==1) y <- K[, labelled,drop = FALSE] else y <- as.matrix(colSums(K[, labelled])) K <- alpha * K[, !labelled] ym <- matrix(0,m,iterations) ym[,1] <- y for (iteration in 2:iterations) ym[, iteration] <- ym[, 1] + K %*% ym[!labelled, iteration-1] ym[labelled,] <- NA r <- ym r[!labelled,] <- compute.ranks(-r[!labelled, ]) if(convergence) convergence <- (r - rep(r[,dim(r)[2]],iterations))/(m-sum(labelled)) else convergence <- matrix() res <- cbind(t(t(1:m)), ym[,iterations], r[,iterations]) return(new("ranking", .Data=res, convergence = convergence, edgegraph = edgegraph)) }) ## kernelMatrix interface setMethod("ranking",signature(x="kernelMatrix"), function (x, y, alpha = 0.99, iterations = 600, convergence = FALSE, ...) { m <- dim(x)[1] if(length(y) != m) { ym <- matrix(0,m,1) ym[y] <- 1 y <- ym } if (is.null(y)) y <- matrix(1, m, 1) labelled <- y != 0 if (!any(labelled)) stop("no labels sublied") diag(x) <- 0 ##K <- sparse(K) cs <- colSums(x) ##cs[cs <= 10e-6] <- 1 D <- 1/sqrt(cs) x <- D * x %*% diag(D) if(sum(labelled)==1) y <- x[, labelled,drop = FALSE] else y <- as.matrix(colSums(x[, labelled])) x <- alpha * x[, !labelled] ym <- matrix(0,m,iterations) ym[,1] <- y for (iteration in 2:iterations) ym[, iteration] <- ym[, 1] + x %*% ym[!labelled, iteration-1] ym[labelled,] <- NA r <- ym r[!labelled,] <- compute.ranks(-r[!labelled, ]) if(convergence) convergence <- (r - rep(r[,dim(r)[2]],iterations))/(m-sum(labelled)) else convergence <- matrix() res <- cbind(t(t(1:m)), ym[,iterations], r[,iterations]) return(new("ranking", .Data=res, convergence = convergence)) }) ## list interface setMethod("ranking",signature(x="list"), function (x, y, kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), alpha = 0.99, iterations = 600, convergence = FALSE, ...) { m <- length(x) if(length(y) != m) { ym <- matrix(0,m,1) ym[y] <- 1 y <- ym } if (is.null(y)) y <- matrix(1, m, 1) labelled <- y != 0 if (!any(labelled)) stop("no labels sublied") if(is.character(kernel)) kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","besseldot","laplacedot")) if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") edgegraph <- matrix() K <- kernelMatrix(kernel,x) diag(K) <- 0 ##K <- sparse(K) cs <- colSums(K) ##cs[cs <= 10e-6] <- 1 D <- 1/sqrt(cs) K <- D * K %*% diag(D) if(sum(labelled)==1) y <- K[, labelled,drop = FALSE] else y <- as.matrix(colSums(K[, labelled])) K <- alpha * K[, !labelled] ym <- matrix(0,m,iterations) ym[,1] <- y for (iteration in 2:iterations) ym[, iteration] <- ym[, 1] + K %*% ym[!labelled, iteration-1] ym[labelled,] <- NA r <- ym r[!labelled,] <- compute.ranks(-r[!labelled, ]) if(convergence) convergence <- (r - rep(r[,dim(r)[2]],iterations))/(m-sum(labelled)) else convergence <- matrix() res <- cbind(t(t(1:m)), ym[,iterations], r[,iterations]) return(new("ranking", .Data=res, convergence = convergence, edgegraph = NULL)) }) minimum.spanning.tree <- function(sed) { max.sed.in.tree <- 0 E <- matrix(0,dim(sed)[1],dim(sed)[2]) n <- dim(E)[1] C <- logical(n) cmp <- sed diag(cmp) <- NA ans <- min(cmp, na.rm = TRUE) i <- which.min(cmp) j <- i%/%n + 1 i <- i%%n +1 for (nC in 1:n) { cmp <- sed cmp[C,] <- NA cmp[,!C] <- NA if(nC == 1) { ans <- 1 i <- 1 } else{ ans <- min(cmp, na.rm=TRUE) i <- which.min(cmp)} j <- i%/%n + 1 i <- i%%n + 1 E[i, j] <- nC E[j, i] <- nC C[i] <- TRUE max.sed.in.tree <- max(max.sed.in.tree, sed[i, j]) } ## E <- sparse(E) res <- list(E=E, max.sed.in.tree=max.sed.in.tree) } compute.ranks <- function(am) { rm <- matrix(0,dim(am)[1],dim(am)[2]) for (j in 1:dim(am)[2]) { a <- am[, j] sort <- sort(a, index.return = TRUE) sorted <- sort$x r <- sort$ix r[r] <- 1:length(r) while(1) { if(sum(na.omit(diff(sorted) == 0)) == 0) break tied <- sorted[min(which(diff(sorted) == 0))] sorted[sorted==tied] <- NA r[a==tied] <- mean(r[a==tied]) } rm[, j] <- r } return(rm) } setMethod("show","ranking", function(object) { cat("Ranking object of class \"ranking\"","\n") cat("\n") show(object@.Data) cat("\n") if(!any(is.na(convergence(object)))) cat("convergence matrix included.","\n") if(!any(is.na(edgegraph(object)))) cat("edgegraph matrix included.","\n") }) kernlab/R/specc.R0000644000175100001440000002543412676465043013354 0ustar hornikusers## Spectral clustering ## author : alexandros setGeneric("specc",function(x, ...) standardGeneric("specc")) setMethod("specc", signature(x = "formula"), function(x, data = NULL, na.action = na.omit, ...) { mt <- terms(x, data = data) if(attr(mt, "response") > 0) stop("response not allowed in formula") attr(mt, "intercept") <- 0 cl <- match.call() mf <- match.call(expand.dots = FALSE) mf$formula <- mf$x mf$... <- NULL mf[[1L]] <- quote(stats::model.frame) mf <- eval(mf, parent.frame()) na.act <- attr(mf, "na.action") x <- model.matrix(mt, mf) res <- specc(x, ...) cl[[1]] <- as.name("specc") if(!is.null(na.act)) n.action(res) <- na.action return(res) }) setMethod("specc",signature(x="matrix"),function(x, centers, kernel = "rbfdot", kpar = "automatic", nystrom.red = FALSE, nystrom.sample = dim(x)[1]/6, iterations = 200, mod.sample = 0.75, na.action = na.omit, ...) { x <- na.action(x) rown <- rownames(x) x <- as.matrix(x) m <- nrow(x) if (missing(centers)) stop("centers must be a number or a matrix") if (length(centers) == 1) { nc <- centers if (m < centers) stop("more cluster centers than data points.") } else nc <- dim(centers)[2] if(is.character(kpar)) { kpar <- match.arg(kpar,c("automatic","local")) if(kpar == "automatic") { if (nystrom.red == TRUE) sam <- sample(1:m, floor(mod.sample*nystrom.sample)) else sam <- sample(1:m, floor(mod.sample*m)) sx <- unique(x[sam,]) ns <- dim(sx)[1] dota <- rowSums(sx*sx)/2 ktmp <- crossprod(t(sx)) for (i in 1:ns) ktmp[i,]<- 2*(-ktmp[i,] + dota + rep(dota[i], ns)) ## fix numerical prob. ktmp[ktmp<0] <- 0 ktmp <- sqrt(ktmp) kmax <- max(ktmp) kmin <- min(ktmp + diag(rep(Inf,dim(ktmp)[1]))) kmea <- mean(ktmp) lsmin <- log2(kmin) lsmax <- log2(kmax) midmax <- min(c(2*kmea, kmax)) midmin <- max(c(kmea/2,kmin)) rtmp <- c(seq(midmin,0.9*kmea,0.05*kmea), seq(kmea,midmax,0.08*kmea)) if ((lsmax - (Re(log2(midmax))+0.5)) < 0.5) step <- (lsmax - (Re(log2(midmax))+0.5)) else step <- 0.5 if (((Re(log2(midmin))-0.5)-lsmin) < 0.5 ) stepm <- ((Re(log2(midmin))-0.5) - lsmin) else stepm <- 0.5 tmpsig <- c(2^(seq(lsmin,(Re(log2(midmin))-0.5), stepm)), rtmp, 2^(seq(Re(log2(midmax))+0.5, lsmax,step))) diss <- matrix(rep(Inf,length(tmpsig)*nc),ncol=nc) for (i in 1:length(tmpsig)){ ka <- exp((-(ktmp^2))/(2*(tmpsig[i]^2))) diag(ka) <- 0 d <- 1/sqrt(rowSums(ka)) if(!any(d==Inf) && !any(is.na(d))&& (max(d)[1]-min(d)[1] < 10^4)) { l <- d * ka %*% diag(d) xi <- eigen(l,symmetric=TRUE)$vectors[,1:nc] yi <- xi/sqrt(rowSums(xi^2)) res <- kmeans(yi, centers, iterations) diss[i,] <- res$withinss } } ms <- which.min(rowSums(diss)) kernel <- rbfdot((tmpsig[ms]^(-2))/2) ## Compute Affinity Matrix if (nystrom.red == FALSE) km <- kernelMatrix(kernel, x) } if (kpar=="local") { if (nystrom.red == TRUE) stop ("Local Scaling not supported for nystrom reduction.") s <- rep(0,m) dota <- rowSums(x*x)/2 dis <- crossprod(t(x)) for (i in 1:m) dis[i,]<- 2*(-dis[i,] + dota + rep(dota[i],m)) ## fix numerical prob. dis[dis < 0] <- 0 for (i in 1:m) s[i] <- median(sort(sqrt(dis[i,]))[1:5]) ## Compute Affinity Matrix km <- exp(-dis / s%*%t(s)) kernel <- "Localy scaled RBF kernel" } } else { if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") ## Compute Affinity Matrix if (nystrom.red == FALSE) km <- kernelMatrix(kernel, x) } if (nystrom.red == TRUE){ n <- floor(nystrom.sample) ind <- sample(1:m, m) x <- x[ind,] tmps <- sort(ind, index.return = TRUE) reind <- tmps$ix A <- kernelMatrix(kernel, x[1:n,]) B <- kernelMatrix(kernel, x[-(1:n),], x[1:n,]) d1 <- colSums(rbind(A,B)) d2 <- rowSums(B) + drop(matrix(colSums(B),1) %*% .ginv(A)%*%t(B)) dhat <- sqrt(1/c(d1,d2)) A <- A * (dhat[1:n] %*% t(dhat[1:n])) B <- B * (dhat[(n+1):m] %*% t(dhat[1:n])) Asi <- .sqrtm(.ginv(A)) Q <- A + Asi %*% crossprod(B) %*% Asi tmpres <- svd(Q) U <- tmpres$u L <- tmpres$d V <- rbind(A,B) %*% Asi %*% U %*% .ginv(sqrt(diag(L))) yi <- matrix(0,m,nc) ## for(i in 2:(nc +1)) ## yi[,i-1] <- V[,i]/V[,1] for(i in 1:nc) ## specc yi[,i] <- V[,i]/sqrt(sum(V[,i]^2)) res <- kmeans(yi[reind,], centers, iterations) } else{ if(is(kernel)[1] == "rbfkernel") diag(km) <- 0 d <- 1/sqrt(rowSums(km)) l <- d * km %*% diag(d) xi <- eigen(l)$vectors[,1:nc] yi <- xi/sqrt(rowSums(xi^2)) res <- kmeans(yi, centers, iterations) } cent <- matrix(unlist(lapply(1:nc,ll<- function(l){colMeans(x[which(res$cluster==l), ,drop=FALSE])})),ncol=dim(x)[2], byrow=TRUE) withss <- unlist(lapply(1:nc,ll<- function(l){sum((x[which(res$cluster==l),, drop=FALSE] - cent[l,])^2)})) names(res$cluster) <- rown return(new("specc", .Data=res$cluster, size = res$size, centers=cent, withinss=withss, kernelf= kernel)) }) setMethod("specc",signature(x="list"),function(x, centers, kernel = "stringdot", kpar = list(length=4, lambda=0.5), nystrom.red = FALSE, nystrom.sample = length(x)/6, iterations = 200, mod.sample = 0.75, na.action = na.omit, ...) { x <- na.action(x) m <- length(x) if (missing(centers)) stop("centers must be a number or a matrix") if (length(centers) == 1) { nc <- centers if (m < centers) stop("more cluster centers than data points.") } else nc <- dim(centers)[2] if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if (nystrom.red == TRUE){ n <- nystrom.sample ind <- sample(1:m, m) x <- x[ind,] tmps <- sort(ind, index.return = TRUE) reind <- tmps$ix A <- kernelMatrix(kernel, x[1:n,]) B <- kernelMatrix(kernel, x[-(1:n),], x[1:n,]) d1 <- colSums(rbind(A,B)) d2 <- rowSums(B) + drop(matrix(colSums(B),1) %*% .ginv(A)%*%t(B)) dhat <- sqrt(1/c(d1,d2)) A <- A * (dhat[1:n] %*% t(dhat[1:n])) B <- B * (dhat[(n+1):m] %*% t(dhat[1:n])) Asi <- .sqrtm(.ginv(A)) Q <- A + Asi %*% crossprod(B) %*% Asi tmpres <- svd(Q) U <- tmpres$u L <- tmpres$d V <- rbind(A,B) %*% Asi %*% U %*% .ginv(sqrt(diag(L))) yi <- matrix(0,m,nc) ## for(i in 2:(nc +1)) ## yi[,i-1] <- V[,i]/V[,1] for(i in 1:nc) ## specc yi[,i] <- V[,i]/sqrt(sum(V[,i]^2)) res <- kmeans(yi[reind,], centers, iterations) } else{ ## Compute Affinity Matrix / in our case just the kernel matrix km <- kernelMatrix(kernel, x) if(is(kernel)[1] == "rbfkernel") diag(km) <- 0 d <- 1/sqrt(rowSums(km)) l <- d * km %*% diag(d) xi <- eigen(l)$vectors[,1:nc] sqxi <- rowSums(xi^2) if(any(sqxi==0)) stop("Zero eigenvector elements, try using a lower value for the length hyper-parameter") yi <- xi/sqrt(sqxi) res <- kmeans(yi, centers, iterations) } return(new("specc", .Data=res$cluster, size = res$size, kernelf= kernel)) }) setMethod("specc",signature(x="kernelMatrix"),function(x, centers, nystrom.red = FALSE, iterations = 200, ...) { m <- nrow(x) if (missing(centers)) stop("centers must be a number or a matrix") if (length(centers) == 1) { nc <- centers if (m < centers) stop("more cluster centers than data points.") } else nc <- dim(centers)[2] if(dim(x)[1]!=dim(x)[2]) { nystrom.red <- TRUE if(dim(x)[1] < dim(x)[2]) x <- t(x) m <- nrow(x) n <- ncol(x) } if (nystrom.red == TRUE){ A <- x[1:n,] B <- x[-(1:n),] d1 <- colSums(rbind(A,B)) d2 <- rowSums(B) + drop(matrix(colSums(B),1) %*% .ginv(A)%*%t(B)) dhat <- sqrt(1/c(d1,d2)) A <- A * (dhat[1:n] %*% t(dhat[1:n])) B <- B * (dhat[(n+1):m] %*% t(dhat[1:n])) Asi <- .sqrtm(.ginv(A)) Q <- A + Asi %*% crossprod(B) %*% Asi tmpres <- svd(Q) U <- tmpres$u L <- tmpres$d V <- rbind(A,B) %*% Asi %*% U %*% .ginv(sqrt(diag(L))) yi <- matrix(0,m,nc) ## for(i in 2:(nc +1)) ## yi[,i-1] <- V[,i]/V[,1] for(i in 1:nc) ## specc yi[,i] <- V[,i]/sqrt(sum(V[,i]^2)) res <- kmeans(yi, centers, iterations) } else{ d <- 1/sqrt(rowSums(x)) l <- d * x %*% diag(d) xi <- eigen(l)$vectors[,1:nc] yi <- xi/sqrt(rowSums(xi^2)) res <- kmeans(yi, centers, iterations) } ## cent <- matrix(unlist(lapply(1:nc,ll<- function(l){colMeans(x[which(res$cluster==l),])})),ncol=dim(x)[2], byrow=TRUE) ## withss <- unlist(lapply(1:nc,ll<- function(l){sum((x[which(res$cluster==l),] - cent[l,])^2)})) return(new("specc", .Data=res$cluster, size = res$size, centers = matrix(0), withinss = c(0), kernelf= "Kernel Matrix used as input.")) }) setMethod("show","specc", function(object){ cat("Spectral Clustering object of class \"specc\"","\n") cat("\n","Cluster memberships:","\n","\n") cat(object@.Data,"\n","\n") show(kernelf(object)) cat("\n") if(!any(is.na(centers(object)))){ cat(paste("Centers: ","\n")) show(centers(object)) cat("\n")} cat(paste("Cluster size: ","\n")) show(size(object)) cat("\n") if(!is.logical(withinss(object))){ cat(paste("Within-cluster sum of squares: ", "\n")) show(withinss(object)) cat("\n")} }) .ginv <- function (X, tol = sqrt(.Machine$double.eps)) { if (length(dim(X)) > 2 || !(is.numeric(X) || is.complex(X))) stop("'X' must be a numeric or complex matrix") if (!is.matrix(X)) X <- as.matrix(X) Xsvd <- svd(X) if (is.complex(X)) Xsvd$u <- Conj(Xsvd$u) Positive <- Xsvd$d > max(tol * Xsvd$d[1], 0) if (all(Positive)) Xsvd$v %*% (1/Xsvd$d * t(Xsvd$u)) else if (!any(Positive)) array(0, dim(X)[2:1]) else Xsvd$v[, Positive, drop = FALSE] %*% ((1/Xsvd$d[Positive]) * t(Xsvd$u[, Positive, drop = FALSE])) } .sqrtm <- function(x) { tmpres <- eigen(x) V <- t(tmpres$vectors) D <- tmpres$values if(is.complex(D)) D <- Re(D) D <- pmax(D,0) return(crossprod(V*sqrt(D),V)) } kernlab/R/kfa.R0000644000175100001440000001020212676464656013014 0ustar hornikusers ## This code takes the set x of vectors from the input space ## and does projection pursuit to find a good basis for x. ## ## The algorithm is described in Section 14.5 of ## Learning with Kernels by B. Schoelkopf and A. Smola, entitled ## Kernel Feature Analysis. ## ## created : 17.09.04 alexandros ## updated : setGeneric("kfa",function(x, ...) standardGeneric("kfa")) setMethod("kfa", signature(x = "formula"), function(x, data = NULL, na.action = na.omit, ...) { mt <- terms(x, data = data) if(attr(mt, "response") > 0) stop("response not allowed in formula") attr(mt, "intercept") <- 0 cl <- match.call() mf <- match.call(expand.dots = FALSE) mf$formula <- mf$x mf$... <- NULL mf[[1L]] <- quote(stats::model.frame) mf <- eval(mf, parent.frame()) Terms <- attr(mf, "terms") na.act <- attr(mf, "na.action") x <- model.matrix(mt, mf) res <- kfa(x, ...) ## fix up call to refer to the generic, but leave arg name as `formula' cl[[1]] <- as.name("kfa") kcall(res) <- cl attr(Terms,"intercept") <- 0 terms(res) <- Terms if(!is.null(na.act)) n.action(res) <- na.act return(res) }) setMethod("kfa",signature(x="matrix"), function(x, kernel="rbfdot", kpar=list(sigma=0.1), features = 0, subset = 59, normalize = TRUE, na.action = na.omit) { if(!is.matrix(x)) stop("x must be a matrix") x <- na.action(x) if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") ## initialize variables m <- dim(x)[1] if(subset > m) subset <- m if (features==0) features <- subset alpha <- matrix(0,subset,features) alphazero <- rep(1,subset) alphafeat <- matrix(0,features,features) idx <- -(1:subset) randomindex <- sample(1:m, subset) K <- kernelMatrix(kernel,x[randomindex,,drop=FALSE],x) ## main loop for (i in 1:features) { K.cols <- K[-idx, , drop = FALSE] if(i > 1) projections <- K.cols * (alphazero[-idx]%*%t(rep(1,m))) + crossprod(t(alpha[-idx,1:(i-1),drop=FALSE]),K[idx, ,drop = FALSE]) else projections <- K.cols * (alphazero%*%t(rep(1,m))) Q <- apply(projections, 1, sd) Q.tmp <- rep(0,subset) Q.tmp[-idx] <- Q Qidx <- which.max(Q.tmp) Qmax <- Q.tmp[Qidx] if(i > 1) alphafeat[i,1:(i-1)] <- alpha[Qidx,1:(i-1)] alphafeat[i,i] <- alphazero[Qidx] if (i > 1) idx <- c(idx,Qidx) else idx <- Qidx if (i > 1) Qfeat <- c(Qfeat, Qmax) else Qfeat <- Qmax Ksub <- K[idx, idx, drop = FALSE] alphasub <- alphafeat[i,1:i] phisquare <- alphasub %*% Ksub %*% t(t(alphasub)) dotprod <- (alphazero * (K[,idx, drop = FALSE] %*% t(t(alphasub))) + alpha[,1:i]%*%(Ksub%*%t(t(alphasub))))/drop(phisquare) alpha[,1:i] <- alpha[,1:i] - dotprod %*%alphasub if(normalize){ sumalpha <- alphazero + rowSums(abs(alpha)) alphazero <- alphazero / sumalpha alpha <- alpha/ (sumalpha %*% t(rep(1,features))) } } obj <- new("kfa") alpha(obj) <- alphafeat alphaindex(obj) <- randomindex[idx] xmatrix(obj) <- x[alphaindex(obj),] kernelf(obj) <- kernel kcall(obj) <- match.call() return(obj) }) ## project a new matrix into the feature space setMethod("predict",signature(object="kfa"), function(object , x) { if (!is.null(terms(object))) { if(!is.matrix(x)) x <- model.matrix(delete.response(terms(object)), as.data.frame(x), na.action = n.action(object)) } else x <- if (is.vector(x)) t(t(x)) else as.matrix(x) if (!is.matrix(x)) stop("x must be a matrix a vector or a data frame") tmpres <- kernelMult(kernelf(object), x, xmatrix(object), alpha(object)) return(tmpres - matrix(colSums(tmpres)/dim(tmpres)[1],dim(tmpres)[1],dim(tmpres)[2],byrow=TRUE)) }) setMethod("show",signature(object="kfa"), function(object) { cat(paste("Number of features :",dim(alpha(object))[2],"\n")) show(kernelf(object)) }) kernlab/R/sigest.R0000644000175100001440000000465612676465031013555 0ustar hornikusers## sigma estimation for RBF kernels ## author: alexandros setGeneric("sigest", function(x, ...) standardGeneric("sigest")) setMethod("sigest",signature(x="formula"), function (x, data=NULL, frac = 0.5, na.action = na.omit, scaled = TRUE){ call <- match.call() m <- match.call(expand.dots = FALSE) if (is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) ## m$... <- NULL m$formula <- m$x m$x <- NULL m$scaled <- NULL m$frac <- NULL m[[1L]] <- quote(stats::model.frame) m <- eval(m, parent.frame()) Terms <- attr(m, "terms") attr(Terms, "intercept") <- 0 x <- model.matrix(Terms, m) if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { remove <- unique(c(which(labels(Terms) %in% names(attr(x, "contrasts"))), which(!scaled) ) ) scaled <- !attr(x, "assign") %in% remove } ret <- sigest(x, scaled = scaled, frac = frac, na.action = na.action) return (ret) }) setMethod("sigest",signature(x="matrix"), function (x, frac = 0.5, scaled = TRUE, na.action = na.omit) { x <- na.action(x) if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { co <- !apply(x[,scaled, drop = FALSE], 2, var) if (any(co)) { scaled <- rep(FALSE, ncol(x)) warning(paste("Variable(s)", paste("`",colnames(x[,scaled, drop = FALSE])[co], "'", sep="", collapse=" and "), "constant. Cannot scale data.") ) } else { xtmp <- scale(x[,scaled]) x[,scaled] <- xtmp } } m <- dim(x)[1] n <- floor(frac*m) index <- sample(1:m, n, replace = TRUE) index2 <- sample(1:m, n, replace = TRUE) temp <- x[index,, drop=FALSE] - x[index2,,drop=FALSE] dist <- rowSums(temp^2) srange <- 1/quantile(dist[dist!=0],probs=c(0.9,0.5,0.1)) ## ds <- sort(dist[dist!=0]) ## sl <- ds[ceiling(0.2*length(ds))] ## su <- ds[ceiling(0.8*length(ds))] ## srange <- c(1/su,1/median(ds), 1/sl) ## names(srange) <- NULL return(srange) }) kernlab/R/kernelmatrix.R0000644000175100001440000000050311304023134014724 0ustar hornikusers setGeneric("as.kernelMatrix",function(x, center = FALSE) standardGeneric("as.kernelMatrix")) setMethod("as.kernelMatrix", signature(x = "matrix"), function(x, center = FALSE) { if(center){ m <- dim(x)[1] x <- t(t(x - colSums(x)/m) - rowSums(x)/m) + sum(x)/m^2 } return(new("kernelMatrix",.Data = x)) }) kernlab/R/couplers.R0000644000175100001440000000770211304023134014063 0ustar hornikusers## wrapper function for couplers ## author : alexandros karatzoglou couple <- function(probin, coupler = "minpair") { if(is.vector(probin)) probin <- matrix(probin,1) m <- dim(probin)[1] coupler <- match.arg(coupler, c("minpair", "pkpd", "vote", "ht")) # if(coupler == "ht") # multiprob <- sapply(1:m, function(x) do.call(coupler, list(probin[x ,], clscnt))) # else multiprob <- sapply(1:m, function(x) do.call(coupler, list(probin[x ,]))) return(t(multiprob)) } ht <- function(probin, clscnt, iter=1000) { nclass <- length(clscnt) probim <- matrix(0, nclass, nclass) for(i in 1:nclass) for(j in 1:nclass) if(j>i) { probim[i,j] <- probin[i] probim[j,i] <- 1 - probin[i] } p <- rep(1/nclass,nclass) u <- matrix((1/nclass)/((1/nclass)+(1/nclass)) ,nclass,nclass) iter <- 0 while(TRUE) { iter <- iter + 1 stoperror <- 0 for(i in 1:nclass){ num <- den <- 0 for(j in 1:nclass) { if (j!=i) { num <- num + (clscnt[i] + clscnt[j]) * probim[i,j] den <- den + (clscnt[i] + clscnt[j]) * u[i,j] } } alpha <- num/(den + 1e-308) p[i] <- p[i]*alpha stoperror <- stoperror + (alpha -1)^2 if(0) { sum <- 0 sum <- sum(p) + sum p <- p/sum for(ui in 1:nclass) for(uj in 1:nclass) u[ui, uj] <- p[ui]/(p[ui] + p[uj]) } else { for(j in 1:nclass) if (i!=j) { u[i,j] <- p[i]/(p[i] + p[j]) u[j,i] <- 1 - u[i,j] } } } if(stoperror < 1e-3) break if(iter > 400) { cat("Too many iterations: aborting", probin, iter, stoperror, p) break } } ## normalize prob. p <- p/sum(p) return(p) } minpair <- function(probin) { ## Count number of classes and construct prob. matrix nclass <- (1+sqrt(1 + 8*length(probin)))/2 if(nclass%%1 != 0) stop("Vector has wrong length only one against one problems supported") probim <- matrix(0, nclass, nclass) probim[upper.tri(probim)] <- probin probim[lower.tri(probim)] <- 1 - probin sum <- colSums(probim^2) Q <- diag(sum) Q[upper.tri(Q)] <- - probin*(1 - probin) Q[lower.tri(Q)] <- - probin*(1 - probin) SQ <- matrix(0,nclass +1, nclass +1) SQ[1:(nclass+1) <= nclass, 1:(nclass+1) <= nclass] <- Q SQ[1:(nclass+1) > nclass, 1:(nclass+1) <= nclass] <- rep(1,nclass) SQ[1:(nclass+1) <= nclass, 1:(nclass+1) > nclass] <- rep(1,nclass) rhs <- rep(0,nclass+1) rhs[nclass + 1] <- 1 p <- solve(SQ,rhs) p <- p[-(nclass+1)]/sum(p[-(nclass+1)]) return(p) } pkpd <- function(probin) { ## Count number of classes and constuct prob. matrix nclass <- k <- (1+sqrt(1 + 8*length(probin)))/2 if(nclass%%1 != 0) stop("Vector has wrong length only one against one problems supported") probim <- matrix(0, nclass, nclass) probim[upper.tri(probim)] <- probin probim[lower.tri(probim)] <- 1 - probin probim[probim==0] <- 1e-300 R <- 1/probim diag(R) <- 0 p <- 1/(rowSums(R) - (k-2)) p <- p/sum(p) return(p) } vote<- function(probin) { nclass <- (1+sqrt(1 + 8*length(probin)))/2 if(nclass%%1 != 0) stop("Vector has wrong length only one against one problems supported") votev <- rep(0,nclass) p <- 0 for(i in 1:(nclass-1)) { jj <- i+1 for(j in jj:nclass) { p <- p+1 votev[i][probin[i] >= 0.5] <- votev[i][probin[i] >= 0.5] + 1 votev[j][probin[j] < 0.5] <- votev[j][probin[j] < 0.5] + 1 } } p <- votev/sum(votev) return(p) } kernlab/R/ksvm.R0000644000175100001440000035057412676464765013257 0ustar hornikusers## Support Vector Machines ## author : alexandros karatzoglou ## updated : 08.02.06 setGeneric("ksvm", function(x, ...) standardGeneric("ksvm")) setMethod("ksvm",signature(x="formula"), function (x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE){ cl <- match.call() m <- match.call(expand.dots = FALSE) if (is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m$... <- NULL m$formula <- m$x m$x <- NULL m$scaled <- NULL m[[1L]] <- quote(stats::model.frame) m <- eval(m, parent.frame()) Terms <- attr(m, "terms") attr(Terms, "intercept") <- 0 ## no intercept x <- model.matrix(Terms, m) y <- model.extract(m, "response") if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { remove <- unique(c(which(labels(Terms) %in% names(attr(x, "contrasts"))), which(!scaled) ) ) scaled <- !attr(x, "assign") %in% remove } ret <- ksvm(x, y, scaled = scaled, ...) kcall(ret) <- cl attr(Terms,"intercept") <- 0 ## no intercept terms(ret) <- Terms if (!is.null(attr(m, "na.action"))) n.action(ret) <- attr(m, "na.action") return (ret) }) setMethod("ksvm",signature(x="vector"), function(x, ...) { x <- t(t(x)) ret <- ksvm(x, ...) return(ret) }) setMethod("ksvm",signature(x="matrix"), function (x, y = NULL, scaled = TRUE, type = NULL, kernel = "rbfdot", kpar = "automatic", C = 1, nu = 0.2, epsilon = 0.1, prob.model = FALSE, class.weights = NULL, cross = 0, fit = TRUE, cache = 40, tol = 0.001, shrinking = TRUE, ... ,subset ,na.action = na.omit) { ## Comment out sparse code, future impl. will be based on "Matrix" ## sparse <- inherits(x, "matrix.csr") ## if (sparse) { ## if (!require(SparseM)) ## stop("Need SparseM package for handling of sparse structures!") ## } sparse <- FALSE if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot","matrix")) if(kernel == "matrix") if(dim(x)[1]==dim(x)[2]) return(ksvm(as.kernelMatrix(x), y = y, type = type, C = C, nu = nu, epsilon = epsilon, prob.model = prob.model, class.weights = class.weights, cross = cross, fit = fit, cache = cache, tol = tol, shrinking = shrinking, ...)) else stop(" kernel matrix not square!") if(is.character(kpar)) if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot") && kpar=="automatic" ) { cat (" Setting default kernel parameters ","\n") kpar <- list() } } ## subsetting and na-handling for matrices ret <- new("ksvm") if (!missing(subset)) x <- x[subset,] if (is.null(y)) x <- na.action(x) else { df <- na.action(data.frame(y, x)) y <- df[,1] x <- as.matrix(df[,-1]) } n.action(ret) <- na.action if (is.null(type)) type(ret) <- if (is.null(y)) "one-svc" else if (is.factor(y)) "C-svc" else "eps-svr" if(!is.null(type)) type(ret) <- match.arg(type,c("C-svc", "nu-svc", "kbb-svc", "spoc-svc", "C-bsvc", "one-svc", "eps-svr", "eps-bsvr", "nu-svr")) ## ## scaling, subsetting, and NA handling ## if (sparse) { ## scale <- rep(FALSE, ncol(x)) ## if(!is.null(y)) na.fail(y) ## x <- t(t(x)) ## make shure that col-indices are sorted ## } x.scale <- y.scale <- NULL ## scaling if (length(scaled) == 1) scaled <- rep(scaled, ncol(x)) if (any(scaled)) { co <- !apply(x[,scaled, drop = FALSE], 2, var) if (any(co)) { scaled <- rep(FALSE, ncol(x)) warning(paste("Variable(s)", paste("`",colnames(x[,scaled, drop = FALSE])[co], "'", sep="", collapse=" and "), "constant. Cannot scale data.") ) } else { xtmp <- scale(x[,scaled]) x[,scaled] <- xtmp x.scale <- attributes(xtmp)[c("scaled:center","scaled:scale")] if (is.numeric(y)&&(type(ret)!="C-svc"&&type(ret)!="nu-svc"&&type(ret)!="C-bsvc"&&type(ret)!="spoc-svc"&&type(ret)!="kbb-svc")) { y <- scale(y) y.scale <- attributes(y)[c("scaled:center","scaled:scale")] y <- as.vector(y) } } } ncols <- ncol(x) m <- nrows <- nrow(x) if (!is.function(kernel)) if (!is.list(kpar)&&is.character(kpar)&&(class(kernel)=="rbfkernel" || class(kernel) =="laplacedot" || kernel == "laplacedot"|| kernel=="rbfdot")){ kp <- match.arg(kpar,"automatic") if(kp=="automatic") kpar <- list(sigma=mean(sigest(x,scaled=FALSE)[c(1,3)])) #cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n") } if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if (!is(y,"vector") && !is.factor (y) & is(y,"matrix") & !(type(ret)=="one-svc")) stop("y must be a vector or a factor.") if(!(type(ret)=="one-svc")) if(is(y,"vector") | is(y,"factor") ) ym <- length(y) else if(is(y,"matrix")) ym <- dim(y)[1] else stop("y must be a matrix or a vector") if ((type(ret) != "one-svc") && ym != m) stop("x and y don't match.") if(nu > 1|| nu <0) stop("nu must be between 0 an 1.") weightlabels <- NULL nweights <- 0 weight <- 0 wl <- 0 ## in case of classification: transform factors into integers if (type(ret) == "one-svc") # one class classification --> set dummy y <- 1 else if (is.factor(y)) { lev(ret) <- levels (y) y <- as.integer (y) if (!is.null(class.weights)) { weightlabels <- match (names(class.weights),lev(ret)) if (any(is.na(weightlabels))) stop ("At least one level name is missing or misspelled.") } } else { if ((type(ret) =="C-svc" || type(ret) == "nu-svc" ||type(ret) == "C-bsvc" || type(ret) == "spoc-svc" || type(ret) == "kbb-svc") && any(as.integer (y) != y)) stop ("dependent variable has to be of factor or integer type for classification mode.") if (type(ret) != "eps-svr" || type(ret) != "nu-svr"|| type(ret)!="eps-bsvr") lev(ret) <- sort(unique (y)) } ## initialize nclass(ret) <- length (unique(y)) p <- 0 K <- 0 svindex <- problem <- NULL sigma <- 0.1 degree <- offset <- scale <- 1 switch(is(kernel)[1], "rbfkernel" = { sigma <- kpar(kernel)$sigma ktype <- 2 }, "tanhkernel" = { sigma <- kpar(kernel)$scale offset <- kpar(kernel)$offset ktype <- 3 }, "polykernel" = { degree <- kpar(kernel)$degree sigma <- kpar(kernel)$scale offset <- kpar(kernel)$offset ktype <- 1 }, "vanillakernel" = { ktype <- 0 }, "laplacekernel" = { ktype <- 5 sigma <- kpar(kernel)$sigma }, "besselkernel" = { ktype <- 6 sigma <- kpar(kernel)$sigma degree <- kpar(kernel)$order offset <- kpar(kernel)$degree }, "anovakernel" = { ktype <- 7 sigma <- kpar(kernel)$sigma degree <- kpar(kernel)$degree }, "splinekernel" = { ktype <- 8 }, { ktype <- 4 } ) prior(ret) <- list(NULL) ## C classification if(type(ret) == "C-svc"){ indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ## prepare the data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) if(ktype==4) K <- kernelMatrix(kernel,x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]) resv <- .Call("smo_optim", as.double(t(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE])), as.integer(li+lj), as.integer(ncol(x)), as.double(yd), as.double(K), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]@ia else 0), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), ##linear term as.integer(ktype), as.integer(0), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(wl), ##weightlabel as.double(weight), as.integer(nweights), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix tmpres <- resv[c(-(li+lj+1),-(li+lj+2))][reind] ## alpha svind <- tmpres > 0 alpha(ret)[p] <- list(tmpres[svind]) ## coefficients alpha*y coef(ret)[p] <- list(alpha(ret)[[p]]*yd[reind][svind]) ## store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][svind]) ## store Support Vectors xmatrix(ret)[p] <- list(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE][reind,,drop=FALSE][svind, ,drop=FALSE]) ## save the indexes from all the SV in a vector (use unique?) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- c(b(ret), resv[li+lj+1]) ## store objective function values in a vector obj(ret) <- c(obj(ret), resv[li+lj+2]) ## used to reconstruct indexes for the patterns matrix x from "indexes" (really usefull ?) problem[p] <- list(c(i,j)) ##store C in return object param(ret)$C <- C ## margin(ret)[p] <- (min(kernelMult(kernel,xd[1:li,],,alpha(ret)[[p]][1:li])) - max(kernelMult(kernel,xd[li:(li+lj),],,alpha(ret)[[p]][li:(li+lj)])))/2 } } } ## nu classification if(type(ret) == "nu-svc"){ indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) if(ktype==4) K <- kernelMatrix(kernel,x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]) resv <- .Call("smo_optim", as.double(t(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE])), as.integer(li+lj), as.integer(ncol(x)), as.double(yd), as.double(K), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]@ia else 0), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), #linear term as.integer(ktype), as.integer(1), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(wl), #weightlabl. as.double(weight), as.integer(nweights), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix tmpres <- resv[c(-(li+lj+1),-(li+lj+2))][reind] svind <- tmpres != 0 alpha(ret)[p] <- coef(ret)[p] <- list(tmpres[svind]) ##store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][svind]) ## store Support Vectors xmatrix(ret)[p] <- list(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE][reind,,drop=FALSE][svind,,drop=FALSE]) ##save the indexes from all the SV in a vector (use unique!) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- c(b(ret), resv[li+lj+1]) ## store objective function values in a vector obj(ret) <- c(obj(ret), resv[li+lj+2]) ## used to reconstruct indexes for the patterns matrix x from "indexes" problem[p] <- list(c(i,j)) param(ret)$nu <- nu ## margin(ret)[p] <- (min(kernelMult(kernel,xd[1:li,],,alpha(ret)[[p]][1:li])) - max(kernelMult(kernel,xd[li:(li+lj),],,alpha(ret)[[p]][li:(li+lj)])))/2 } } } ## Bound constraint C classification if(type(ret) == "C-bsvc"){ if(!is.null(class.weights)) weightedC <- class.weights[weightlabels] * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) if(ktype==4) K <- kernelMatrix(kernel,x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]) resv <- .Call("tron_optim", as.double(t(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE])), as.integer(li+lj), as.integer(ncol(x)), as.double(yd), as.double(K), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]@ia else 0), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE]@ja else 0), as.integer(sparse), as.integer(2), as.double(0), ##countc as.integer(ktype), as.integer(5), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), ## cost value of alpha seeding as.double(2), ## step value of alpha seeding as.integer(wl), ##weightlabel as.double(weight), as.integer(nweights), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), ##qpsize as.integer(shrinking), PACKAGE="kernlab") reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix svind <- resv[-(li+lj+1)][reind] > 0 alpha(ret)[p] <- list(resv[-(li+lj+1)][reind][svind]) ## nonzero alpha*y coef(ret)[p] <- list(alpha(ret)[[p]] * yd[reind][svind]) ## store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][svind]) ## store Support Vectors xmatrix(ret)[p] <- list(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE][reind,,drop = FALSE][svind,,drop = FALSE]) ## save the indexes from all the SV in a vector (use unique?) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- - sapply(coef(ret),sum) ## store obj. values in vector obj(ret) <- c(obj(ret), resv[(li+lj+1)]) ## used to reconstruct indexes for the patterns matrix x from "indexes" (really usefull ?) problem[p] <- list(c(i,j)) ##store C in return object param(ret)$C <- C ## margin(ret)[p] <- (min(kernelMult(kernel,xd[1:li,],,alpha(ret)[[p]][1:li])) - max(kernelMult(kernel,xd[li:(li+lj),],,alpha(ret)[[p]][li:(li+lj)])))/2 } } } ## SPOC multiclass classification if(type(ret) =="spoc-svc") { if(!is.null(class.weights)) weightedC <- class.weights[weightlabels] * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) yd <- sort(y,method="quick", index.return = TRUE) xd <- matrix(x[yd$ix,],nrow=dim(x)[1]) count <- 0 if(ktype==4) K <- kernelMatrix(kernel,x) resv <- .Call("tron_optim", as.double(t(xd)), as.integer(nrow(xd)), as.integer(ncol(xd)), as.double(rep(yd$x-1,2)), as.double(K), as.integer(if (sparse) xd@ia else 0), as.integer(if (sparse) xd@ja else 0), as.integer(sparse), as.integer(nclass(ret)), as.integer(count), as.integer(ktype), as.integer(7), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(C), as.double(2), #Cstep as.integer(0), #weightlabel as.double(0), as.integer(0), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking), PACKAGE="kernlab") reind <- sort(yd$ix,method="quick",index.return=TRUE)$ix alpha(ret) <- t(matrix(resv[-(nclass(ret)*nrow(xd) + 1)],nclass(ret)))[reind,,drop=FALSE] coef(ret) <- lapply(1:nclass(ret), function(x) alpha(ret)[,x][alpha(ret)[,x]!=0]) names(coef(ret)) <- lev(ret) alphaindex(ret) <- lapply(sort(unique(y)), function(x) which(alpha(ret)[,x]!=0)) xmatrix(ret) <- x obj(ret) <- resv[(nclass(ret)*nrow(xd) + 1)] names(alphaindex(ret)) <- lev(ret) svindex <- which(rowSums(alpha(ret)!=0)!=0) b(ret) <- 0 param(ret)$C <- C } ## KBB multiclass classification if(type(ret) =="kbb-svc") { if(!is.null(class.weights)) weightedC <- weightlabels * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) yd <- sort(y,method="quick", index.return = TRUE) x <- x[yd$ix,,drop=FALSE] count <- sapply(unique(yd$x), function(c) length(yd$x[yd$x==c])) if(ktype==4) K <- kernelMatrix(kernel,x) resv <- .Call("tron_optim", as.double(t(x)), as.integer(nrow(x)), as.integer(ncol(x)), as.double(yd$x-1), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(nclass(ret)), as.integer(count), as.integer(ktype), as.integer(8), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(C), #Cbegin as.double(2), #Cstep as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking), PACKAGE="kernlab") reind <- sort(yd$ix,method="quick",index.return=TRUE)$ix alpha(ret) <- matrix(resv[-(nrow(x)*(nclass(ret)-1)+1)],nrow(x))[reind,,drop=FALSE] xmatrix(ret) <- x<- x[reind,,drop=FALSE] coef(ret) <- lapply(1:(nclass(ret)-1), function(x) alpha(ret)[,x][alpha(ret)[,x]!=0]) alphaindex(ret) <- lapply(sort(unique(y)), function(x) which((y == x) & (rowSums(alpha(ret))!=0))) svindex <- which(rowSums(alpha(ret)!=0)!=0) b(ret) <- - sapply(coef(ret),sum) obj(ret) <- resv[(nrow(x)*(nclass(ret)-1)+1)] param(ret)$C <- C } ## Novelty detection if(type(ret) =="one-svc") { if(ktype==4) K <- kernelMatrix(kernel,x) resv <- .Call("smo_optim", as.double(t(x)), as.integer(nrow(x)), as.integer(ncol(x)), as.double(matrix(rep(1,m))), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(2), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres != 0] svindex <- alphaindex(ret) <- which(tmpres != 0) xmatrix(ret) <- x[svindex,,drop=FALSE] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$nu <- nu } ## epsilon regression if(type(ret) =="eps-svr") { if(ktype==4) K <- kernelMatrix(kernel,x) resv <- .Call("smo_optim", as.double(t(x)), as.integer(nrow(x)), as.integer(ncol(x)), as.double(y), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(3), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres != 0] svindex <- alphaindex(ret) <- which(tmpres != 0) xmatrix(ret) <- x[svindex, ,drop=FALSE] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$epsilon <- epsilon param(ret)$C <- C } ## nu regression if(type(ret) =="nu-svr") { if(ktype==4) K <- kernelMatrix(kernel,x) resv <- .Call("smo_optim", as.double(t(x)), as.integer(nrow(x)), as.integer(ncol(x)), as.double(y), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(4), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres!=0] svindex <- alphaindex(ret) <- which(tmpres != 0) xmatrix(ret) <- x[svindex,,drop=FALSE] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$epsilon <- epsilon param(ret)$nu <- nu } ## bound constraint eps regression if(type(ret) =="eps-bsvr") { if(ktype==4) K <- kernelMatrix(kernel,x) resv <- .Call("tron_optim", as.double(t(x)), as.integer(nrow(x)), as.integer(ncol(x)), as.double(y), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(2), as.integer(0), as.integer(ktype), as.integer(6), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), #Cbegin as.double(2), #Cstep as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(0), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[-(m + 1)] alpha(ret) <- coef(ret) <- tmpres[tmpres!=0] svindex <- alphaindex(ret) <- which(tmpres != 0) xmatrix(ret) <- x[svindex,,drop=FALSE] b(ret) <- -sum(alpha(ret)) obj(ret) <- resv[(m + 1)] param(ret)$epsilon <- epsilon param(ret)$C <- C } kcall(ret) <- match.call() kernelf(ret) <- kernel ymatrix(ret) <- y SVindex(ret) <- sort(unique(svindex),method="quick") nSV(ret) <- length(unique(svindex)) if(nSV(ret)==0) stop("No Support Vectors found. You may want to change your parameters") fitted(ret) <- if (fit) predict(ret, x) else NULL if(any(scaled)) scaling(ret) <- list(scaled = scaled, x.scale = x.scale, y.scale = y.scale) if (fit){ if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="spoc-svc"||type(ret)=="kbb-svc"||type(ret)=="C-bsvc") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="one-svc") error(ret) <- sum(!fitted(ret))/m if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr"){ if (!is.null(scaling(ret)$y.scale)){ scal <- scaling(ret)$y.scale$"scaled:scale" fitted(ret) <- fitted(ret) # / scaling(ret)$y.scale$"scaled:scale" + scaling(ret)$y.scale$"scaled:center" } else scal <- 1 error(ret) <- drop(crossprod(fitted(ret) - y)/m) } } cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="spoc-svc"||type(ret)=="kbb-svc"||type(ret)=="C-bsvc") { if(is.null(class.weights)) cret <- ksvm(x[cind,],y[cind],type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, scaled=FALSE, cross = 0, fit = FALSE ,cache = cache) else cret <- ksvm(x[cind,],as.factor(lev(ret)[y[cind]]),type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, scaled=FALSE, cross = 0, fit = FALSE, class.weights = class.weights,cache = cache) cres <- predict(cret, x[vgr[[i]],,drop=FALSE]) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } if(type(ret)=="one-svc") { cret <- ksvm(x[cind,],type=type(ret),kernel=kernel,kpar = NULL,C=C,nu=nu,epsilon=epsilon,tol=tol,scaled=FALSE, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, x[vgr[[i]],, drop=FALSE]) cerror <- (1 - sum(cres)/length(cres))/cross + cerror } if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr") { cret <- ksvm(x[cind,],y[cind],type=type(ret),kernel=kernel,kpar = NULL,C=C,nu=nu,epsilon=epsilon,tol=tol,scaled=FALSE, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, x[vgr[[i]],,drop=FALSE]) if (!is.null(scaling(ret)$y.scale)) scal <- scaling(ret)$y.scale$"scaled:scale" else scal <- 1 cerror <- drop((scal^2)*crossprod(cres - y[vgr[[i]]])/m) + cerror } } cross(ret) <- cerror } prob.model(ret) <- list(NULL) if(prob.model) { if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc") { p <- 0 for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(j,i)] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(i,j)] wl <- c(0,1) nweigths <- 2 } } m <- li+lj suppressWarnings(vgr <- split(c(sample(1:li,li),sample((li+1):(li+lj),lj)),1:3)) pres <- yres <- NULL for(k in 1:3) { cind <- unsplit(vgr[-k],factor(rep((1:3)[-k],unlist(lapply(vgr[-k],length))))) if(is.null(class.weights)) cret <- ksvm(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE][cind,],yd[cind],type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, scaled=FALSE, cross = 0, fit = FALSE ,cache = cache, prob.model = FALSE) else cret <- ksvm(x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE][cind,],as.factor(lev(ret)[y[c(indexes[[i]],indexes[[j]])][cind]]),type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, scaled=FALSE, cross = 0, fit = FALSE, class.weights = class.weights,cache = cache, prob.model = FALSE) yres <- c(yres, yd[vgr[[k]]]) pres <- rbind(pres, predict(cret, x[c(indexes[[i]],indexes[[j]]), ,drop=FALSE][vgr[[k]],],type="decision")) } prob.model(ret)[[p]] <- .probPlatt(pres,yres) } } } if(type(ret) == "eps-svr"||type(ret) == "nu-svr"||type(ret)=="eps-bsvr"){ suppressWarnings(vgr<-split(sample(1:m,m),1:3)) pres <- NULL for(i in 1:3) { cind <- unsplit(vgr[-i],factor(rep((1:3)[-i],unlist(lapply(vgr[-i],length))))) cret <- ksvm(x[cind,],y[cind],type=type(ret),kernel=kernel,kpar = NULL,C=C,nu=nu,epsilon=epsilon,tol=tol,scaled=FALSE, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, x[vgr[[i]],]) if (!is.null(scaling(ret)$y.scale)) cres <- cres * scaling(ret)$y.scale$"scaled:scale" + scaling(ret)$y.scale$"scaled:center" pres <- rbind(pres, cres) } pres[abs(pres) > (5*sd(pres))] <- 0 prob.model(ret) <- list(sum(abs(pres))/dim(pres)[1]) } } return(ret) }) ## kernelmatrix interface setMethod("ksvm",signature(x="kernelMatrix"), function (x, y = NULL, type = NULL, C = 1, nu = 0.2, epsilon = 0.1, prob.model = FALSE, class.weights = NULL, cross = 0, fit = TRUE, cache = 40, tol = 0.001, shrinking = TRUE, ...) { sparse <- FALSE ## subsetting and na-handling for matrices ret <- new("ksvm") if (is.null(type)) type(ret) <- if (is.null(y)) "one-svc" else if (is.factor(y)) "C-svc" else "eps-svr" if(!is.null(type)) type(ret) <- match.arg(type,c("C-svc", "nu-svc", "kbb-svc", "spoc-svc", "C-bsvc", "one-svc", "eps-svr", "eps-bsvr", "nu-svr")) ncols <- ncol(x) m <- nrows <- nrow(x) if (!is(y,"vector") && !is.factor (y) & !is(y,"matrix") & !(type(ret)=="one-svc")) stop("y must be a vector or a factor.") if(!(type(ret)=="one-svc")) if(is(y,"vector") | is(y,"factor")) ym <- length(y) else if(is(y,"matrix")) ym <- dim(y)[1] else stop("y must be a matrix or a vector") if ((type(ret) != "one-svc") && ym != m) stop("x and y don't match.") if(nu > 1|| nu <0) stop("nu must be between 0 an 1.") weightlabels <- NULL nweights <- 0 weight <- 0 wl <- 0 ## in case of classification: transform factors into integers if (type(ret) == "one-svc") # one class classification --> set dummy y <- 1 else if (is.factor(y)) { lev(ret) <- levels (y) y <- as.integer (y) if (!is.null(class.weights)) { if (is.null(names (class.weights))) stop ("Weights have to be specified along with their according level names !") weightlabels <- match (names(class.weights),lev(ret)) if (any(is.na(weightlabels))) stop ("At least one level name is missing or misspelled.") } } else { if ((type(ret) =="C-svc" || type(ret) == "nu-svc" ||type(ret) == "C-bsvc" || type(ret) == "spoc-svc" || type(ret) == "kbb-svc") && any(as.integer (y) != y)) stop ("dependent variable has to be of factor or integer type for classification mode.") if (type(ret) != "eps-svr" || type(ret) != "nu-svr"|| type(ret)!="eps-bsvr") lev(ret) <- sort(unique (y)) } ## initialize nclass(ret) <- length (unique(y)) p <- 0 svindex <- problem <- NULL sigma <- 0.1 degree <- offset <- scale <- 1 ktype <- 4 prior(ret) <- list(NULL) ## C classification if(type(ret) == "C-svc"){ indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) xdd <- matrix(1,li+lj,1) resv <- .Call("smo_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd), as.double(as.vector(x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE])), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]@ia else 0), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), ##linear term as.integer(ktype), as.integer(0), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(wl), ##weightlabel as.double(weight), as.integer(nweights), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix tmpres <- resv[c(-(li+lj+1),-(li+lj+2))][reind] ## alpha svind <- tmpres > 0 alpha(ret)[p] <- list(tmpres[svind]) ## coefficients alpha*y coef(ret)[p] <- list(alpha(ret)[[p]]*yd[reind][svind]) ## store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][svind]) ## store Support Vectors ## xmatrix(ret)[p] <- list(xd[svind, svind,drop=FALSE]) ## save the indexes from all the SV in a vector (use unique?) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- c(b(ret), resv[li+lj+1]) ## store objective function values in vector obj(ret) <- c(obj(ret), resv[li+lj+2]) ## used to reconstruct indexes for the patterns matrix x from "indexes" (really usefull ?) problem[p] <- list(c(i,j)) ##store C in return object param(ret)$C <- C } } } ## nu classification if(type(ret) == "nu-svc"){ indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) ##xd <- matrix(0,(li+lj),(li+lj)) ##xdi <- 1:(li+lj) <= li ##xd[xdi,rep(TRUE,li+lj)] <- x[indexes[[i]],c(indexes[[i]],indexes[[j]])] ##xd[xdi == FALSE,rep(TRUE,li+lj)] <- x[indexes[[j]],c(indexes[[i]],indexes[[j]])] if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) xdd <- matrix(1,li+lj,1) resv <- .Call("smo_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd), as.double(x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]@ia else 0), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), #linear term as.integer(ktype), as.integer(1), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(wl), #weightlabl. as.double(weight), as.integer(nweights), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix tmpres <- resv[c(-(li+lj+1),-(li+lj+2))][reind] alpha(ret)[p] <- coef(ret)[p] <- list(tmpres[tmpres != 0]) ##store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][tmpres != 0]) ## store Support Vectors ## xmatrix(ret)[p] <- list(xd[tmpres != 0,tmpres != 0,drop=FALSE]) ##save the indexes from all the SV in a vector (use unique!) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- c(b(ret), resv[li+lj+1]) ## store objective function values in vector obj(ret) <- c(obj(ret), resv[li+lj+2]) ## used to reconstruct indexes for the patterns matrix x from "indexes" problem[p] <- list(c(i,j)) param(ret)$nu <- nu ## margin(ret)[p] <- (min(kernelMult(kernel,xd[1:li,],,alpha(ret)[[p]][1:li])) - max(kernelMult(kernel,xd[li:(li+lj),],,alpha(ret)[[p]][li:(li+lj)])))/2 } } } ## Bound constraint C classification if(type(ret) == "C-bsvc"){ if(!is.null(class.weights)) weightedC <- class.weights[weightlabels] * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) xdd <- matrix(rnorm(li+lj),li+lj,1) resv <- .Call("tron_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd), as.double(x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]@ia else 0), as.integer(if (sparse) x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE]@ja else 0), as.integer(sparse), as.integer(2), as.double(0), ##countc as.integer(ktype), as.integer(5), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), ## cost value of alpha seeding as.double(2), ## step value of alpha seeding as.integer(wl), ##weightlabel as.double(weight), as.integer(nweights), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), ##qpsize as.integer(shrinking), PACKAGE="kernlab") reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix alpha(ret)[p] <- list(resv[-(li+lj+1)][reind][resv[-(li+lj+1)][reind] > 0]) ## nonzero alpha*y coef(ret)[p] <- list(alpha(ret)[[p]] * yd[reind][resv[-(li+lj+1)][reind] > 0]) ## store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][resv[-(li+lj+1)][reind] > 0]) ## store Support Vectors ## xmatrix(ret)[p] <- list(xd[resv > 0 ,resv > 0,drop = FALSE]) ## save the indexes from all the SV in a vector (use unique?) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- - sapply(coef(ret),sum) ## store objective function values vector obj(ret) <- c(obj(ret), resv[(li+lj+1)]) ## used to reconstruct indexes for the patterns matrix x from "indexes" (really usefull ?) problem[p] <- list(c(i,j)) ##store C in return object param(ret)$C <- C } } } ## SPOC multiclass classification if(type(ret) =="spoc-svc") { if(!is.null(class.weights)) weightedC <- class.weights[weightlabels] * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) yd <- sort(y,method="quick", index.return = TRUE) x <- matrix(x[yd$ix,yd$ix],nrow=dim(x)[1]) count <- 0 xdd <- matrix(1,m,1) resv <- .Call("tron_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(rep(yd$x-1,2)), as.double(x), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(nclass(ret)), as.integer(count), as.integer(ktype), as.integer(7), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(C), as.double(2), #Cstep as.integer(0), #weightlabel as.double(0), as.integer(0), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking), PACKAGE="kernlab") reind <- sort(yd$ix,method="quick",index.return=TRUE)$ix alpha(ret) <- t(matrix(resv[-(nclass(ret)*nrow(xdd)+1)],nclass(ret)))[reind,,drop=FALSE] coef(ret) <- lapply(1:nclass(ret), function(x) alpha(ret)[,x][alpha(ret)[,x]!=0]) names(coef(ret)) <- lev(ret) alphaindex(ret) <- lapply(sort(unique(y)), function(x) which(alpha(ret)[,x]!=0)) ## xmatrix(ret) <- x names(alphaindex(ret)) <- lev(ret) svindex <- which(rowSums(alpha(ret)!=0)!=0) b(ret) <- 0 obj(ret) <- resv[(nclass(ret)*nrow(xdd)+1)] param(ret)$C <- C } ## KBB multiclass classification if(type(ret) =="kbb-svc") { if(!is.null(class.weights)) weightedC <- class.weights[weightlabels] * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) yd <- sort(y,method="quick", index.return = TRUE) x <- matrix(x[yd$ix,yd$ix],nrow=dim(x)[1]) count <- sapply(unique(yd$x), function(c) length(yd$x[yd$x==c])) xdd <- matrix(1,m,1) resv <- .Call("tron_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd$x-1), as.double(x), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(nclass(ret)), as.integer(count), as.integer(ktype), as.integer(8), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), #Cbegin as.double(2), #Cstep as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking), PACKAGE="kernlab") reind <- sort(yd$ix,method="quick",index.return=TRUE)$ix alpha(ret) <- matrix(resv[-(nrow(x)*(nclass(ret)-1) + 1)],nrow(x))[reind,,drop=FALSE] coef(ret) <- lapply(1:(nclass(ret)-1), function(x) alpha(ret)[,x][alpha(ret)[,x]!=0]) alphaindex(ret) <- lapply(sort(unique(y)), function(x) which((y == x) & (rowSums(alpha(ret))!=0))) svindex <- which(rowSums(alpha(ret)!=0)!=0) b(ret) <- - sapply(coef(ret),sum) obj(ret) <- resv[(nrow(x)*(nclass(ret)-1) + 1)] param(ret)$C <- C } ## Novelty detection if(type(ret) =="one-svc") { xdd <- matrix(1,m,1) resv <- .Call("smo_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(matrix(rep(1,m))), as.double(x), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(2), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres != 0] svindex <- alphaindex(ret) <- which(tmpres != 0) ## xmatrix(ret) <- x[svindex,svindex,drop=FALSE] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$nu <- nu } ## epsilon regression if(type(ret) =="eps-svr") { xdd <- matrix(1,m,1) resv <- .Call("smo_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(y), as.double(x), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(3), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres != 0] svindex <- alphaindex(ret) <- which(tmpres != 0) ## xmatrix(ret) <- x[svindex,svindex ,drop=FALSE] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$epsilon <- epsilon param(ret)$C <- C } ## nu regression if(type(ret) =="nu-svr") { xdd <- matrix(1,m,1) resv <- .Call("smo_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(y), as.double(x), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(4), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres!=0] svindex <- alphaindex(ret) <- which(tmpres != 0) ## xmatrix(ret) <- x[svindex,svindex,drop=FALSE] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$epsilon <- epsilon param(ret)$nu <- nu } ## bound constraint eps regression if(type(ret) =="eps-bsvr") { xdd <- matrix(1,m,1) resv <- .Call("tron_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(y), as.double(x), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(2), as.integer(0), as.integer(ktype), as.integer(6), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), #Cbegin as.double(2), #Cstep as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(0), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[-(m+1)] alpha(ret) <- coef(ret) <- tmpres[tmpres!=0] svindex <- alphaindex(ret) <- which(tmpres != 0) ## xmatrix(ret) <- x[svindex,,drop=FALSE] b(ret) <- -sum(alpha(ret)) obj(ret) <- resv[(m+1)] param(ret)$epsilon <- epsilon param(ret)$C <- C } kcall(ret) <- match.call() kernelf(ret) <- " Kernel matrix used as input." ymatrix(ret) <- y SVindex(ret) <- unique(sort(svindex,method="quick")) nSV(ret) <- length(unique(svindex)) if(nSV(ret)==0) stop("No Support Vectors found. You may want to change your parameters") fitted(ret) <- if (fit) predict(ret, as.kernelMatrix(x[,SVindex(ret),drop = FALSE])) else NULL if (fit){ if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="spoc-svc"||type(ret)=="kbb-svc"||type(ret)=="C-bsvc") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="one-svc") error(ret) <- sum(!fitted(ret))/m if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr") error(ret) <- drop(crossprod(fitted(ret) - y)/m) } cross(ret) <- -1 if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr <- split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="spoc-svc"||type(ret)=="kbb-svc"||type(ret)=="C-bsvc") { if(is.null(class.weights)) cret <- ksvm(as.kernelMatrix(x[cind,cind]),y[cind],type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE ,cache = cache) else cret <- ksvm(as.kernelMatrix(x[cind,cind]), as.factor(lev(ret)[y[cind]]),type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE, class.weights = class.weights,cache = cache) cres <- predict(cret, as.kernelMatrix(x[vgr[[i]], cind,drop = FALSE][,SVindex(cret),drop=FALSE])) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } if(type(ret)=="one-svc") { cret <- ksvm(as.kernelMatrix(x[cind,cind]),type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE ,cache = cache) cres <- predict(cret, as.kernelMatrix(x[vgr[[i]], cind,drop = FALSE][,SVindex(cret),drop=FALSE])) cerror <- (1 - sum(cres)/length(cres))/cross + cerror } if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr") { cret <- ksvm(as.kernelMatrix(x[cind,cind]),y[cind],type=type(ret), C=C,nu=nu,epsilon=epsilon,tol=tol, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, as.kernelMatrix(x[vgr[[i]], cind,drop = FALSE][,SVindex(cret),drop=FALSE])) cerror <- drop(crossprod(cres - y[vgr[[i]]])/m) + cerror } } cross(ret) <- cerror } prob.model(ret) <- list(NULL) if(prob.model) { if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc") { p <- 0 for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(j,i)] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(i,j)] wl <- c(0,1) nweigths <- 2 } } m <- li+lj suppressWarnings(vgr <- split(c(sample(1:li,li),sample((li+1):(li+lj),lj)),1:3)) pres <- yres <- NULL for(k in 1:3) { cind <- unsplit(vgr[-k],factor(rep((1:3)[-k],unlist(lapply(vgr[-k],length))))) if(is.null(class.weights)) cret <- ksvm(as.kernelMatrix(x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE][cind,cind]),yd[cind],type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE ,cache = cache, prob.model=FALSE) else cret <- ksvm(as.kernelMatrix(x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE][cind,cind]), as.factor(lev(ret)[y[c(indexes[[i]],indexes[[j]])][cind]]),type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE, class.weights = class.weights,cache = cache, prob.model=FALSE) yres <- c(yres,yd[vgr[[k]]]) pres <- rbind(pres,predict(cret, as.kernelMatrix(x[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE][vgr[[k]], cind,drop = FALSE][,SVindex(cret),drop = FALSE]),type="decision")) } prob.model(ret)[[p]] <- .probPlatt(pres,yres) } } } if(type(ret) == "eps-svr"||type(ret) == "nu-svr"||type(ret)=="eps-bsvr"){ suppressWarnings(vgr<-split(sample(1:m,m),1:3)) pres <- NULL for(i in 1:3) { cind <- unsplit(vgr[-i],factor(rep((1:3)[-i],unlist(lapply(vgr[-i],length))))) cret <- ksvm(as.kernelMatrix(x[cind,cind]),y[cind],type=type(ret), C=C, nu=nu, epsilon=epsilon, tol=tol, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, as.kernelMatrix(x[vgr[[i]], cind, drop = FALSE][,SVindex(cret), drop = FALSE])) pres <- rbind(pres,predict(cret, as.kernelMatrix(x[vgr[[i]],cind , drop = FALSE][,SVindex(cret) ,drop = FALSE]),type="decision")) } pres[abs(pres) > (5*sd(pres))] <- 0 prob.model(ret) <- list(sum(abs(pres))/dim(pres)[1]) } } return(ret) }) .classAgreement <- function (tab) { n <- sum(tab) if (!is.null(dimnames(tab))) { lev <- intersect(colnames(tab), rownames(tab)) p0 <- sum(diag(tab[lev, lev])) / n } else { m <- min(dim(tab)) p0 <- sum(diag(tab[1:m, 1:m])) / n } return(p0) } ## List Interface setMethod("ksvm",signature(x="list"), function (x, y = NULL, type = NULL, kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), C = 1, nu = 0.2, epsilon = 0.1, prob.model = FALSE, class.weights = NULL, cross = 0, fit = TRUE, cache = 40, tol = 0.001, shrinking = TRUE, ... ,na.action = na.omit) { ret <- new("ksvm") if (is.null(y)) x <- na.action(x) n.action(ret) <- na.action sparse <- FALSE if (is.null(type)) type(ret) <- if (is.null(y)) "one-svc" else if (is.factor(y)) "C-svc" else "eps-svr" if(!is.null(type)) type(ret) <- match.arg(type,c("C-svc", "nu-svc", "kbb-svc", "spoc-svc", "C-bsvc", "one-svc", "eps-svr", "eps-bsvr", "nu-svr")) m <- length(x) if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot","stringdot")) if(is.character(kpar)) if(kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot" || kernel == "rbfdot" || kernel == "laplacedot" ) { stop("List interface supports only the stringdot kernel.") } } if(is(kernel,"kernel") & !is(kernel,"stringkernel")) stop("List interface supports only the stringdot kernel.") if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if (!is(y,"vector") && !is.factor(y) & !is(y,"matrix") & !(type(ret)=="one-svc")) stop("y must be a vector or a factor.") if(!(type(ret)=="one-svc")) if(is(y,"vector") | is(y,"factor")) ym <- length(y) else if(is(y,"matrix")) ym <- dim(y)[1] else stop("y must be a matrix or a vector") if ((type(ret) != "one-svc") && ym != m) stop("x and y don't match.") if(nu > 1|| nu <0) stop("nu must be between 0 an 1.") weightlabels <- NULL nweights <- 0 weight <- 0 wl <- 0 ## in case of classification: transform factors into integers if (type(ret) == "one-svc") # one class classification --> set dummy y <- 1 else if (is.factor(y)) { lev(ret) <- levels (y) y <- as.integer (y) if (!is.null(class.weights)) { if (is.null(names (class.weights))) stop ("Weights have to be specified along with their according level names !") weightlabels <- match (names(class.weights),lev(ret)) if (any(is.na(weightlabels))) stop ("At least one level name is missing or misspelled.") } } else { if ((type(ret) =="C-svc" || type(ret) == "nu-svc" ||type(ret) == "C-bsvc" || type(ret) == "spoc-svc" || type(ret) == "kbb-svc") && any(as.integer (y) != y)) stop ("dependent variable has to be of factor or integer type for classification mode.") if (type(ret) != "eps-svr" || type(ret) != "nu-svr"|| type(ret)!="eps-bsvr") lev(ret) <- sort(unique (y)) } ## initialize if (type(ret) =="C-svc" || type(ret) == "nu-svc" ||type(ret) == "C-bsvc" || type(ret) == "spoc-svc" || type(ret) == "kbb-svc") nclass(ret) <- length (unique(y)) p <- 0 K <- 0 svindex <- problem <- NULL ktype <- 4 prior(ret) <- list(NULL) sigma <- 0.1 degree <- offset <- scale <- 1 ## C classification if(type(ret) == "C-svc"){ indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) K <- kernelMatrix(kernel,x[c(indexes[[i]],indexes[[j]])]) xdd <- matrix(1,li+lj,1) resv <- .Call("smo_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), ##linear term as.integer(ktype), as.integer(0), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(wl), ##weightlabel as.double(weight), as.integer(nweights), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix tmpres <- resv[c(-(li+lj+1),-(li+lj+2))][reind] ## alpha alpha(ret)[p] <- list(tmpres[tmpres > 0]) ## coefficients alpha*y coef(ret)[p] <- list(alpha(ret)[[p]]*yd[reind][tmpres > 0]) ## store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][tmpres>0]) ## store Support Vectors xmatrix(ret)[p] <- list(x[c(indexes[[i]],indexes[[j]])][reind][tmpres > 0]) ## save the indexes from all the SV in a vector (use unique?) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- c(b(ret), resv[li+lj+1]) obj(ret) <- c(obj(ret),resv[li+lj+2]) ## used to reconstruct indexes for the patterns matrix x from "indexes" (really usefull ?) problem[p] <- list(c(i,j)) ##store C in return object param(ret)$C <- C ## margin(ret)[p] <- (min(kernelMult(kernel,xd[1:li,],,alpha(ret)[[p]][1:li])) - max(kernelMult(kernel,xd[li:(li+lj),],,alpha(ret)[[p]][li:(li+lj)])))/2 } } } ## nu classification if(type(ret) == "nu-svc"){ indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) K <- kernelMatrix(kernel,x[c(indexes[[i]],indexes[[j]])]) xdd <- matrix(1,li+lj,1) resv <- .Call("smo_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), #linear term as.integer(ktype), as.integer(1), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(wl), #weightlabl. as.double(weight), as.integer(nweights), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix tmpres <- resv[c(-(li+lj+1),-(li+lj+2))][reind] alpha(ret)[p] <- coef(ret)[p] <- list(tmpres[tmpres != 0]) ##store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][tmpres!=0]) ## store Support Vectors xmatrix(ret)[p] <- list(x[c(indexes[[i]],indexes[[j]])][reind][tmpres != 0]) ##save the indexes from all the SV in a vector (use unique!) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- c(b(ret), resv[li+lj+1]) obj(ret) <- c(obj(ret), resv[li+lj+2]) ## used to reconstruct indexes for the patterns matrix x from "indexes" problem[p] <- list(c(i,j)) param(ret)$nu <- nu ## margin(ret)[p] <- (min(kernelMult(kernel,xd[1:li,],,alpha(ret)[[p]][1:li])) - max(kernelMult(kernel,xd[li:(li+lj),],,alpha(ret)[[p]][li:(li+lj)])))/2 } } } ## Bound constraint C classification if(type(ret) == "C-bsvc"){ if(!is.null(class.weights)) weightedC <- class.weights[weightlabels] * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) indexes <- lapply(sort(unique(y)), function(kk) which(y == kk)) for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(j,i)]] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- class.weights[weightlabels[c(i,j)]] wl <- c(0,1) nweigths <- 2 } } boolabel <- yd >= 0 prior1 <- sum(boolabel) md <- length(yd) prior0 <- md - prior1 prior(ret)[[p]] <- list(prior1 = prior1, prior0 = prior0) K <- kernelMatrix(kernel,x[c(indexes[[i]],indexes[[j]])]) xdd <- matrix(1,li+lj,1) resv <- .Call("tron_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(2), as.double(0), ##countc as.integer(ktype), as.integer(5), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), ## cost value of alpha seeding as.double(2), ## step value of alpha seeding as.integer(wl), ##weightlabel as.double(weight), as.integer(nweights), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), ##qpsize as.integer(shrinking), PACKAGE="kernlab") reind <- sort(c(indexes[[i]],indexes[[j]]),method="quick",index.return=TRUE)$ix alpha(ret)[p] <- list(resv[-(li+lj+1)][reind][resv[-(li+lj+1)][reind] > 0]) ## nonzero alpha*y coef(ret)[p] <- list(alpha(ret)[[p]] * yd[reind][resv[-(li+lj+1)][reind] > 0]) ## store SV indexes from current problem for later use in predict alphaindex(ret)[p] <- list(c(indexes[[i]],indexes[[j]])[reind][resv[-(li+lj+1)][reind] > 0]) ## store Support Vectors xmatrix(ret)[p] <- list(x[c(indexes[[i]],indexes[[j]])][reind][resv[-(li+lj+1)][reind] > 0]) ## save the indexes from all the SV in a vector (use unique?) svindex <- c(svindex,alphaindex(ret)[[p]]) ## store betas in a vector b(ret) <- - sapply(coef(ret),sum) obj(ret) <- c(obj(ret),resv[(li+lj+1)]) ## used to reconstruct indexes for the patterns matrix x from "indexes" (really usefull ?) problem[p] <- list(c(i,j)) ##store C in return object param(ret)$C <- C ## margin(ret)[p] <- (min(kernelMult(kernel,xd[1:li,],,alpha(ret)[[p]][1:li])) - max(kernelMult(kernel,xd[li:(li+lj),],,alpha(ret)[[p]][li:(li+lj)])))/2 } } } ## SPOC multiclass classification if(type(ret) =="spoc-svc") { if(!is.null(class.weights)) weightedC <- class.weights[weightlabels] * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) yd <- sort(y,method="quick", index.return = TRUE) x <- x[yd$ix] count <- 0 K <- kernelMatrix(kernel,x) xdd <- matrix(1,length(x),1) resv <- .Call("tron_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(rep(yd$x-1,2)), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(nclass(ret)), as.integer(count), as.integer(ktype), as.integer(7), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(C), as.double(2), #Cstep as.integer(0), #weightlabel as.double(0), as.integer(0), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking), PACKAGE="kernlab") reind <- sort(yd$ix,method="quick",index.return=TRUE)$ix alpha(ret) <- t(matrix(resv[-(nclass(ret)*nrow(xdd) + 1)],nclass(ret)))[reind,,drop=FALSE] coef(ret) <- lapply(1:nclass(ret), function(x) alpha(ret)[,x][alpha(ret)[,x]!=0]) names(coef(ret)) <- lev(ret) alphaindex(ret) <- lapply(1:nclass(ret), function(x) which(alpha(ret)[,x]!=0)) names(alphaindex(ret)) <- lev(ret) xmatrix(ret) <- x svindex <- which(rowSums(alpha(ret)!=0)!=0) b(ret) <- 0 obj(ret) <- resv[(nclass(ret)*nrow(xdd) + 1)] param(ret)$C <- C } ## KBB multiclass classification if(type(ret) =="kbb-svc") { if(!is.null(class.weights)) weightedC <- weightlabels * rep(C,nclass(ret)) else weightedC <- rep(C,nclass(ret)) yd <- sort(y,method="quick", index.return = TRUE) x <- x[yd$ix] count <- sapply(unique(yd$x), function(c) length(yd$x[yd$x==c])) K <- kernelMatrix(kernel,x) xdd <- matrix(1,length(x),1) resv <- .Call("tron_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(yd$x-1), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(nclass(ret)), as.integer(count), as.integer(ktype), as.integer(8), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), #Cbegin as.double(2), #Cstep as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(weightedC), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking), PACKAGE="kernlab") reind <- sort(yd$ix,method="quick",index.return=TRUE)$ix alpha(ret) <- matrix(resv[-((nclass(ret)-1)*length(x)+1)],length(x))[reind,,drop=FALSE] xmatrix(ret) <- x<- x[reind] coef(ret) <- lapply(1:(nclass(ret)-1), function(x) alpha(ret)[,x][alpha(ret)[,x]!=0]) alphaindex(ret) <- lapply(sort(unique(y)), function(x) which((y == x) & (rowSums(alpha(ret))!=0))) svindex <- which(rowSums(alpha(ret)!=0)!=0) b(ret) <- - sapply(coef(ret),sum) obj(ret) <- resv[((nclass(ret)-1)*length(x)+1)] param(ret)$C <- C } ## Novelty detection if(type(ret) =="one-svc") { K <- kernelMatrix(kernel,x) xdd <- matrix(1,length(x),1) resv <- .Call("smo_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(matrix(rep(1,m))), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(2), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres != 0] svindex <- alphaindex(ret) <- which(tmpres !=0) xmatrix(ret) <- x[svindex] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$nu <- nu } ## epsilon regression if(type(ret) =="eps-svr") { K <- kernelMatrix(kernel,x) xdd <- matrix(1,length(x),1) resv <- .Call("smo_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(y), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(3), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres != 0] svindex <- alphaindex(ret) <- which(tmpres != 0) xmatrix(ret) <- x[svindex] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$epsilon <- epsilon param(ret)$C <- C } ## nu regression if(type(ret) =="nu-svr") { K <- kernelMatrix(kernel,x) xdd <- matrix(1,length(x),1) resv <- .Call("smo_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(y), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.double(matrix(rep(-1,m))), as.integer(ktype), as.integer(4), as.double(C), as.double(nu), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.integer(0), as.double(0), as.integer(0), as.double(cache), as.double(tol), as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[c(-(m+1),-(m+2))] alpha(ret) <- coef(ret) <- tmpres[tmpres!=0] svindex <- alphaindex(ret) <- which(tmpres != 0) xmatrix(ret) <- x[svindex] b(ret) <- resv[(m+1)] obj(ret) <- resv[(m+2)] param(ret)$epsilon <- epsilon param(ret)$nu <- nu } ## bound constraint eps regression if(type(ret) =="eps-bsvr") { K <- kernelMatrix(kernel,x) xdd <- matrix(1,length(x),1) resv <- .Call("tron_optim", as.double(t(xdd)), as.integer(nrow(xdd)), as.integer(ncol(xdd)), as.double(y), as.double(K), as.integer(if (sparse) x@ia else 0), as.integer(if (sparse) x@ja else 0), as.integer(sparse), as.integer(2), as.integer(0), as.integer(ktype), as.integer(6), as.double(C), as.double(epsilon), as.double(sigma), as.integer(degree), as.double(offset), as.double(1), #Cbegin as.double(2), #Cstep as.integer(0), #weightlabl. as.double(0), as.integer(0), as.double(0), as.double(cache), as.double(tol), as.integer(10), #qpsize as.integer(shrinking), PACKAGE="kernlab") tmpres <- resv[-(m+1)] alpha(ret) <- coef(ret) <- tmpres[tmpres!=0] svindex <- alphaindex(ret) <- which(tmpres != 0) xmatrix(ret) <- x[svindex] b(ret) <- -sum(alpha(ret)) obj(ret) <- resv[(m+1)] param(ret)$epsilon <- epsilon param(ret)$C <- C } kcall(ret) <- match.call() kernelf(ret) <- kernel ymatrix(ret) <- y SVindex(ret) <- unique(svindex) nSV(ret) <- length(unique(svindex)) if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr") nclass(ret) <- m if(type(ret)=="one-svc") nclass(ret) <- 1 if(nSV(ret)==0) stop("No Support Vectors found. You may want to change your parameters") fitted(ret) <- if (fit) { if((type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc") & nclass(ret) > 2) predict(ret, x) else if((type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc"||type(ret)=="spoc-bsvc"||type(ret)=="kbb-bsvc")) predict(ret,as.kernelMatrix(K[reind,reind][,SVindex(ret), drop=FALSE])) else predict(ret,as.kernelMatrix(K[,SVindex(ret), drop=FALSE])) } else NULL if (fit){ if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="spoc-svc"||type(ret)=="kbb-svc"||type(ret)=="C-bsvc") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="one-svc") error(ret) <- sum(!fitted(ret))/m if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr") error(ret) <- drop(crossprod(fitted(ret) - y)/m) } cross(ret) <- -1 if(!((type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc") & nclass(ret) > 2)) { if((type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc"||type(ret)=="spoc-bsvc"||type(ret)=="kbb-bsvc")) K <- as.kernelMatrix(K[reind,reind]) if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr <- split(sample(1:dim(K)[1],dim(K)[1]),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="spoc-svc"||type(ret)=="kbb-svc"||type(ret)=="C-bsvc") { if(is.null(class.weights)) cret <- ksvm(as.kernelMatrix(K[cind,cind]),y[cind],type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE ,cache = cache) else cret <- ksvm(as.kernelMatrix(K[cind,cind]),as.factor(lev(ret)[y[cind]]),type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE, class.weights = class.weights,cache = cache) cres <- predict(cret, as.kernelMatrix(K[vgr[[i]], cind,drop = FALSE][,SVindex(cret),drop=FALSE])) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } if(type(ret)=="one-svc") { cret <- ksvm(as.kernelMatrix(K[cind,cind]), type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE ,cache = cache) cres <- predict(cret, as.kernelMatrix(K[vgr[[i]], cind,drop = FALSE][,SVindex(cret),drop=FALSE])) cerror <- (1 - sum(cres)/length(cres))/cross + cerror } if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr") { cret <- ksvm(as.kernelMatrix(K[cind,cind]),y[cind],type=type(ret), C=C,nu=nu,epsilon=epsilon,tol=tol, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, as.kernelMatrix(K[vgr[[i]], cind,drop = FALSE][,SVindex(cret),drop=FALSE])) cerror <- drop(crossprod(cres - y[vgr[[i]]])/m) + cerror } } cross(ret) <- cerror } prob.model(ret) <- list(NULL) if(prob.model) { if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc") { p <- 0 for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(j,i)] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(i,j)] wl <- c(0,1) nweigths <- 2 } } m <- li+lj suppressWarnings(vgr <- split(c(sample(1:li,li),sample((li+1):(li+lj),lj)),1:3)) pres <- yres <- NULL for(k in 1:3) { cind <- unsplit(vgr[-k],factor(rep((1:3)[-k],unlist(lapply(vgr[-k],length))))) cret <- ksvm(as.kernelMatrix(as.kernelMatrix(K[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE][cind,cind])), yd[cind], type = type(ret), C=C, nu=nu, tol=tol, cross = 0, fit = FALSE, cache = cache, prob.model=FALSE) yres <- c(yres,yd[vgr[[k]]]) pres <- rbind(pres,predict(cret, as.kernelMatrix(K[c(indexes[[i]],indexes[[j]]),c(indexes[[i]],indexes[[j]]),drop=FALSE][vgr[[k]], cind,drop = FALSE][,SVindex(cret),drop = FALSE]),type="decision")) } prob.model(ret)[[p]] <- .probPlatt(pres,yres) } } } if(type(ret) == "eps-svr"||type(ret) == "nu-svr"||type(ret)=="eps-bsvr"){ suppressWarnings(vgr<-split(sample(1:m,m),1:3)) pres <- NULL for(i in 1:3) { cind <- unsplit(vgr[-i],factor(rep((1:3)[-i],unlist(lapply(vgr[-i],length))))) cret <- ksvm(as.kernelMatrix(K[cind,cind]),y[cind],type=type(ret), C=C, nu=nu, epsilon=epsilon, tol=tol, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, as.kernelMatrix(K[vgr[[i]], cind, drop = FALSE][,SVindex(cret), drop = FALSE])) pres <- rbind(pres,predict(cret, as.kernelMatrix(K[vgr[[i]],cind , drop = FALSE][,SVindex(cret) ,drop = FALSE]),type="decision")) } pres[abs(pres) > (5*sd(pres))] <- 0 prob.model(ret) <- list(sum(abs(pres))/dim(pres)[1]) } } } else{ if(cross == 1) cat("\n","cross should be >1 no cross-validation done!","\n","\n") else if (cross > 1) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="spoc-svc"||type(ret)=="kbb-svc"||type(ret)=="C-bsvc") { if(is.null(class.weights)) cret <- ksvm(x[cind],y[cind],type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, cross = 0, fit = FALSE ,cache = cache) else cret <- ksvm(x[cind],as.factor(lev(ret)[y[cind]]),type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, cross = 0, fit = FALSE, class.weights = class.weights,cache = cache) cres <- predict(cret, x[vgr[[i]]]) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } if(type(ret)=="eps-svr"||type(ret)=="nu-svr"||type(ret)=="eps-bsvr") { cret <- ksvm(x[cind],y[cind],type=type(ret),kernel=kernel,kpar = NULL,C=C,nu=nu,epsilon=epsilon,tol=tol, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, x[vgr[[i]]]) cerror <- drop(crossprod(cres - y[vgr[[i]]])/m)/cross + cerror } } cross(ret) <- cerror } prob.model(ret) <- list(NULL) if(prob.model) { if(type(ret)=="C-svc"||type(ret)=="nu-svc"||type(ret)=="C-bsvc") { p <- 0 for (i in 1:(nclass(ret)-1)) { jj <- i+1 for(j in jj:nclass(ret)) { p <- p+1 ##prepare data li <- length(indexes[[i]]) lj <- length(indexes[[j]]) if(y[indexes[[i]][1]] < y[indexes[[j]]][1]) { yd <- c(rep(-1,li),rep(1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(j,i)] wl <- c(1,0) nweights <- 2 } } else { yd <- c(rep(1,li),rep(-1,lj)) if(!is.null(class.weights)){ weight <- weightlabels[c(i,j)] wl <- c(0,1) nweigths <- 2 } } m <- li+lj suppressWarnings(vgr <- split(c(sample(1:li,li),sample((li+1):(li+lj),lj)),1:3)) pres <- yres <- NULL for(k in 1:3) { cind <- unsplit(vgr[-k],factor(rep((1:3)[-k],unlist(lapply(vgr[-k],length))))) if(is.null(class.weights)) cret <- ksvm(x[c(indexes[[i]], indexes[[j]])][cind],yd[cind],type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, cross = 0, fit = FALSE ,cache = cache, prob.model=FALSE) else cret <- ksvm(x[c(indexes[[i]], indexes[[j]])][cind],as.factor(lev(ret)[y[cind]]),type = type(ret),kernel=kernel,kpar = NULL, C=C, nu=nu, tol=tol, cross = 0, fit = FALSE, class.weights = class.weights,cache = cache, prob.model=FALSE) yres <- c(yres,yd[vgr[[k]]]) pres <- rbind(pres,predict(cret, x[c(indexes[[i]], indexes[[j]])][vgr[[k]]],type="decision")) } prob.model(ret)[[p]] <- .probPlatt(pres,yres) } } } if(type(ret) == "eps-svr"||type(ret) == "nu-svr"||type(ret)=="eps-bsvr"){ suppressWarnings(vgr<-split(sample(1:m,m),1:3)) for(i in 1:3) { cind <- unsplit(vgr[-i],factor(rep((1:3)[-i],unlist(lapply(vgr[-i],length))))) cret <- ksvm(x[cind],y[cind],type=type(ret),kernel=kernel,kpar = NULL,C=C,nu=nu,epsilon=epsilon,tol=tol, cross = 0, fit = FALSE, cache = cache, prob.model = FALSE) cres <- predict(cret, x[vgr[[i]]]) pres <- rbind(pres,predict(cret, x[vgr[[i]]],type="decision")) } pres[abs(pres) > (5*sd(pres))] <- 0 prob.model(ret) <- list(sum(abs(pres))/dim(pres)[1]) } } } return(ret) }) ##**************************************************************# ## predict for matrix, data.frame input setMethod("predict", signature(object = "ksvm"), function (object, newdata, type = "response", coupler = "minpair") { type <- match.arg(type,c("response","probabilities","votes","decision")) if (missing(newdata) && type=="response" & !is.null(fitted(object))) return(fitted(object)) else if(missing(newdata)) stop("Missing data !") if(!is(newdata,"list")){ if (!is.null(terms(object)) & !is(newdata,"kernelMatrix")) { if(!is.matrix(newdata)) newdata <- model.matrix(delete.response(terms(object)), as.data.frame(newdata), na.action = n.action(object)) } else newdata <- if (is.vector(newdata)) t(t(newdata)) else as.matrix(newdata) newnrows <- nrow(newdata) newncols <- ncol(newdata) if(!is(newdata,"kernelMatrix") && !is.null(xmatrix(object))){ if(is(xmatrix(object),"list") && is(xmatrix(object)[[1]],"matrix")) oldco <- ncol(xmatrix(object)[[1]]) if(is(xmatrix(object),"matrix")) oldco <- ncol(xmatrix(object)) if (oldco != newncols) stop ("test vector does not match model !") } } else newnrows <- length(newdata) p <- 0 if (is.list(scaling(object))) newdata[,scaling(object)$scaled] <- scale(newdata[,scaling(object)$scaled, drop = FALSE], center = scaling(object)$x.scale$"scaled:center", scale = scaling(object)$x.scale$"scaled:scale") if(type == "response" || type =="decision" || type=="votes") { if(type(object)=="C-svc"||type(object)=="nu-svc"||type(object)=="C-bsvc") { predres <- 1:newnrows if(type=="decision") votematrix <- matrix(0,nclass(object)*(nclass(object)-1)/2,newnrows) else votematrix <- matrix(0,nclass(object),newnrows) for(i in 1:(nclass(object)-1)) { jj <- i+1 for(j in jj:nclass(object)) { p <- p+1 if(is(newdata,"kernelMatrix")) ret <- newdata[,which(SVindex(object)%in%alphaindex(object)[[p]]), drop=FALSE] %*% coef(object)[[p]] - b(object)[p] else ret <- kernelMult(kernelf(object),newdata,xmatrix(object)[[p]],coef(object)[[p]]) - b(object)[p] if(type=="decision") votematrix[p,] <- ret else{ votematrix[i,ret<0] <- votematrix[i,ret<0] + 1 votematrix[j,ret>0] <- votematrix[j,ret>0] + 1 } } } if(type == "decision") predres <- t(votematrix) else predres <- sapply(predres, function(x) which.max(votematrix[,x])) } if(type(object) == "spoc-svc") { predres <- 1:newnrows votematrix <- matrix(0,nclass(object),newnrows) for(i in 1:nclass(object)){ if(is(newdata,"kernelMatrix")) votematrix[i,] <- newdata[,which(SVindex(object)%in%alphaindex(object)[[i]]), drop=FALSE] %*% coef(object)[[i]] else if (is(newdata,"list")) votematrix[i,] <- kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]]],coef(object)[[i]]) else votematrix[i,] <- kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]],,drop=FALSE],coef(object)[[i]]) } predres <- sapply(predres, function(x) which.max(votematrix[,x])) } if(type(object) == "kbb-svc") { predres <- 1:newnrows votematrix <- matrix(0,nclass(object),newnrows) A <- rowSums(alpha(object)) for(i in 1:nclass(object)) { for(k in (1:i)[-i]) if(is(newdata,"kernelMatrix")) votematrix[k,] <- votematrix[k,] - (newdata[,which(SVindex(object)%in%alphaindex(object)[[i]]), drop=FALSE] %*% alpha(object)[,k][alphaindex(object)[[i]]] + sum(alpha(object)[,k][alphaindex(object)[[i]]])) else if (is(newdata,"list")) votematrix[k,] <- votematrix[k,] - (kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]]],alpha(object)[,k][alphaindex(object)[[i]]]) + sum(alpha(object)[,k][alphaindex(object)[[i]]])) else votematrix[k,] <- votematrix[k,] - (kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]],,drop=FALSE],alpha(object)[,k][alphaindex(object)[[i]]]) + sum(alpha(object)[,k][alphaindex(object)[[i]]])) if(is(newdata,"kernelMatrix")) votematrix[i,] <- votematrix[i,] + (newdata[,which(SVindex(object)%in%alphaindex(object)[[i]]), drop=FALSE] %*% A[alphaindex(object)[[i]]] + sum(A[alphaindex(object)[[i]]])) else if (is(newdata,"list")) votematrix[i,] <- votematrix[i,] + (kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]]],A[alphaindex(object)[[i]]]) + sum(A[alphaindex(object)[[i]]])) else votematrix[i,] <- votematrix[i,] + (kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]],,drop=FALSE],A[alphaindex(object)[[i]]]) + sum(A[alphaindex(object)[[i]]])) if(i <= (nclass(object)-1)) for(kk in i:(nclass(object)-1)) if(is(newdata,"kernelMatrix")) votematrix[kk+1,] <- votematrix[kk+1,] - (newdata[,which(SVindex(object)%in%alphaindex(object)[[i]]), drop=FALSE] %*% alpha(object)[,kk][alphaindex(object)[[i]]] + sum(alpha(object)[,kk][alphaindex(object)[[i]]])) else if (is(newdata,"list")) votematrix[kk+1,] <- votematrix[kk+1,] - (kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]]],alpha(object)[,kk][alphaindex(object)[[i]]]) + sum(alpha(object)[,kk][alphaindex(object)[[i]]])) else votematrix[kk+1,] <- votematrix[kk+1,] - (kernelMult(kernelf(object),newdata,xmatrix(object)[alphaindex(object)[[i]],,drop=FALSE],alpha(object)[,kk][alphaindex(object)[[i]]]) + sum(alpha(object)[,kk][alphaindex(object)[[i]]])) } predres <- sapply(predres, function(x) which.max(votematrix[,x])) } } if(type == "probabilities") { if(is.null(prob.model(object)[[1]])) stop("ksvm object contains no probability model. Make sure you set the paramater prob.model in ksvm during training.") if(type(object)=="C-svc"||type(object)=="nu-svc"||type(object)=="C-bsvc") { binprob <- matrix(0, newnrows, nclass(object)*(nclass(object) - 1)/2) for(i in 1:(nclass(object)-1)) { jj <- i+1 for(j in jj:nclass(object)) { p <- p+1 if(is(newdata,"kernelMatrix")) binprob[,p] <- 1 - .SigmoidPredict(as.vector(newdata[,which(SVindex(object)%in%alphaindex(object)[[p]]), drop=FALSE] %*% coef(object)[[p]] - b(object)[p]), prob.model(object)[[p]]$A, prob.model(object)[[p]]$B) else binprob[,p] <- 1 - .SigmoidPredict(as.vector(kernelMult(kernelf(object),newdata,xmatrix(object)[[p]],coef(object)[[p]]) - b(object)[p]), prob.model(object)[[p]]$A, prob.model(object)[[p]]$B) } } multiprob <- couple(binprob, coupler = coupler) } else stop("probability estimates only supported for C-svc, C-bsvc and nu-svc") } if(type(object) == "one-svc") { if(is(newdata,"kernelMatrix")) ret <- newdata %*% coef(object) - b(object) else ret <- kernelMult(kernelf(object),newdata,xmatrix(object),coef(object)) - b(object) ##one-class-classification: return TRUE/FALSE (probabilities ?) if(type=="decision") return(ret) else { ret[ret>0]<-1 return(ret == 1) } } else { if(type(object)=="eps-svr"||type(object)=="nu-svr"||type(object)=="eps-bsvr") { if(is(newdata,"kernelMatrix")) predres <- newdata %*% coef(object) - b(object) else predres <- kernelMult(kernelf(object),newdata,xmatrix(object),coef(object)) - b(object) } else { ##classification & votes : return votematrix if(type == "votes") return(votematrix) ##classification & probabilities : return probability matrix if(type == "probabilities") { colnames(multiprob) <- lev(object) return(multiprob) } if(is.numeric(lev(object)) && type == "response") return(lev(object)[predres]) if (is.character(lev(object)) && type!="decision") { ##classification & type response: return factors if(type == "response") return(factor (lev(object)[predres], levels = lev(object))) } } } if (!is.null(scaling(object)$y.scale) & !is(newdata,"kernelMatrix") & !is(newdata,"list")) ## return raw values, possibly scaled back return(predres * scaling(object)$y.scale$"scaled:scale" + scaling(object)$y.scale$"scaled:center") else ##else: return raw values return(predres) }) #****************************************************************************************# setMethod("show","ksvm", function(object){ cat("Support Vector Machine object of class \"ksvm\"","\n") cat("\n") cat(paste("SV type:", type(object))) switch(type(object), "C-svc" = cat(paste(" (classification)", "\n")), "nu-svc" = cat(paste(" (classification)", "\n")), "C-bsvc" = cat(paste(" (classification)", "\n")), "one-svc" = cat(paste(" (novelty detection)", "\n")), "spoc-svc" = cat(paste(" (classification)", "\n")), "kbb-svc" = cat(paste(" (classification)", "\n")), "eps-svr" = cat(paste(" (regression)","\n")), "nu-svr" = cat(paste(" (regression)","\n")) ) switch(type(object), "C-svc" = cat(paste(" parameter : cost C =",param(object)$C, "\n")), "nu-svc" = cat(paste(" parameter : nu =", param(object)$nu, "\n")), "C-bsvc" = cat(paste(" parameter : cost C =",param(object)$C, "\n")), "one-svc" = cat(paste(" parameter : nu =", param(object)$nu, "\n")), "spoc-svc" = cat(paste(" parameter : cost C =",param(object)$C, "\n")), "kbb-svc" = cat(paste(" parameter : cost C =",param(object)$C, "\n")), "eps-svr" = cat(paste(" parameter : epsilon =",param(object)$epsilon, " cost C =", param(object)$C,"\n")), "nu-svr" = cat(paste(" parameter : epsilon =", param(object)$epsilon, " nu =", param(object)$nu,"\n")) ) cat("\n") show(kernelf(object)) cat(paste("\nNumber of Support Vectors :", nSV(object),"\n")) cat("\nObjective Function Value :", round(obj(object),4),"\n") ## if(type(object)=="C-svc" || type(object) == "nu-svc") ## cat(paste("Margin width :",margin(object),"\n")) if(!is.null(fitted(object))) cat(paste("Training error :", round(error(object),6),"\n")) if(cross(object)!= -1) cat("Cross validation error :",round(cross(object),6),"\n") if(!is.null(prob.model(object)[[1]])&&(type(object)=="eps-svr" ||type(object)=="nu-svr"||type(object)=="eps-bsvr")) cat("Laplace distr. width :",round(prob.model(object)[[1]],6),"\n") if(!is.null(prob.model(object)[[1]]) & (type(object) == "C-svc"| type(object) == "nu-svc"| type(object) == "C-bsvc")) cat("Probability model included.","\n") ##train error & loss }) setMethod("plot", signature(x = "ksvm", y = "missing"), function(x, data = NULL, grid = 50, slice = list(), ...) { if (type(x) =="C-svc" || type(x) == "nu-svc") { if(nclass(x) > 2) stop("plot function only supports binary classification") if (!is.null(terms(x))&&!is.null(data)) { if(!is.matrix(data)) sub <- model.matrix(delete.response(terms(x)), as.data.frame(data), na.action = n.action(x)) } else if(!is.null(data)) sub <- as.matrix(data) else sub <- xmatrix(x)[[1]] ## sub <- sub[,!colnames(xmatrix(x)[[1]])%in%names(slice)] xr <- seq(min(sub[,2]), max(sub[,2]), length = grid) yr <- seq(min(sub[,1]), max(sub[,1]), length = grid) sc <- 0 # if(is.null(data)) # { # sc <- 1 # data <- xmatrix(x)[[1]] # } if(is.data.frame(data) || !is.null(terms(x))){ lis <- c(list(yr), list(xr), slice) names(lis)[1:2] <- setdiff(colnames(sub),names(slice)) new <- expand.grid(lis)[,labels(terms(x))] } else new <- expand.grid(xr,yr) if(sc== 1) scaling(x) <- NULL preds <- predict(x, new ,type = "decision") if(is.null(terms(x))) xylb <- colnames(sub) else xylb <- names(lis) lvl <- 37 mymax <- max(abs(preds)) mylevels <- pretty(c(0, mymax), 15) nl <- length(mylevels)-2 mycols <- c(hcl(0, 100 * (nl:0/nl)^1.3, 90 - 40 *(nl:0/nl)^1.3), rev(hcl(260, 100 * (nl:0/nl)^1.3, 90 - 40 *(nl:0/nl)^1.3))) mylevels <- c(-rev(mylevels[-1]), mylevels) index <- max(which(mylevels < min(preds))):min(which(mylevels > max(preds))) mycols <- mycols[index] mylevels <- mylevels[index] #FIXME# previously the plot code assumed that the y values are either #FIXME# -1 or 1, but this is not generally true. If generated from a #FIXME# factor, they are typically 1 and 2. Maybe ymatrix should be #FIXME# changed? ymat <- ymatrix(x) ymean <- mean(unique(ymat)) filled.contour(xr, yr, matrix(as.numeric(preds), nrow = length(xr), byrow = TRUE), col = mycols, levels = mylevels, plot.axes = { axis(1) axis(2) if(!is.null(data)){ points(sub[-SVindex(x),2], sub[-SVindex(x),1], pch = ifelse(ymat[-SVindex(x)] < ymean, 2, 1)) points(sub[SVindex(x),2], sub[SVindex(x),1], pch = ifelse(ymat[SVindex(x)] < ymean, 17, 16))} else{ ## points(sub[-SVindex(x),], pch = ifelse(ymat[-SVindex(x)] < ymean, 2, 1)) points(sub, pch = ifelse(ymat[SVindex(x)] < ymean, 17, 16)) }}, nlevels = lvl, plot.title = title(main = "SVM classification plot", xlab = xylb[2], ylab = xylb[1]), ... ) } else { stop("Only plots of classification ksvm objects supported") } }) setGeneric(".probPlatt", function(deci, yres) standardGeneric(".probPlatt")) setMethod(".probPlatt",signature(deci="ANY"), function(deci,yres) { if (is.matrix(deci)) deci <- as.vector(deci) if (!is.vector(deci)) stop("input should be matrix or vector") yres <- as.vector(yres) ## Create label and count priors boolabel <- yres >= 0 prior1 <- sum(boolabel) m <- length(yres) prior0 <- m - prior1 ## set parameters (should be on the interface I guess) maxiter <- 100 minstep <- 1e-10 sigma <- 1e-3 eps <- 1e-5 ## Construct target support hiTarget <- (prior1 + 1)/(prior1 + 2) loTarget <- 1/(prior0 + 2) length <- prior1 + prior0 t <- rep(loTarget, m) t[boolabel] <- hiTarget ##Initial Point & Initial Fun Value A <- 0 B <- log((prior0 + 1)/(prior1 + 1)) fval <- 0 fApB <- deci*A + B bindex <- fApB >= 0 p <- q <- rep(0,m) fval <- sum(t[bindex]*fApB[bindex] + log(1 + exp(-fApB[bindex]))) fval <- fval + sum((t[!bindex] - 1)*fApB[!bindex] + log(1+exp(fApB[!bindex]))) for (it in 1:maxiter) { h11 <- h22 <- sigma h21 <- g1 <- g2 <- 0 fApB <- deci*A + B bindex <- fApB >= 0 p[bindex] <- exp(-fApB[bindex])/(1 + exp(-fApB[bindex])) q[bindex] <- 1/(1+exp(-fApB[bindex])) bindex <- fApB < 0 p[bindex] <- 1/(1 + exp(fApB[bindex])) q[bindex] <- exp(fApB[bindex])/(1 + exp(fApB[bindex])) d2 <- p*q h11 <- h11 + sum(d2*deci^2) h22 <- h22 + sum(d2) h21 <- h21 + sum(deci*d2) d1 <- t - p g1 <- g1 + sum(deci*d1) g2 <- g2 + sum(d1) ## Stopping Criteria if (abs(g1) < eps && abs(g2) < eps) break ## Finding Newton Direction -inv(t(H))%*%g det <- h11*h22 - h21^2 dA <- -(h22*g1 - h21*g2) / det dB <- -(-h21*g1 + h11*g2) / det gd <- g1*dA + g2*dB ## Line Search stepsize <- 1 while(stepsize >= minstep) { newA <- A + stepsize * dA newB <- B + stepsize * dB ## New function value newf <- 0 fApB <- deci * newA + newB bindex <- fApB >= 0 newf <- sum(t[bindex] * fApB[bindex] + log(1 + exp(-fApB[bindex]))) newf <- newf + sum((t[!bindex] - 1)*fApB[!bindex] + log(1 + exp(fApB[!bindex]))) ## Check decrease if (newf < (fval + 0.0001 * stepsize * gd)) { A <- newA B <- newB fval <- newf break } else stepsize <- stepsize/2 } if (stepsize < minstep) { cat("line search fails", A, B, g1, g2, dA, dB, gd) ret <- .SigmoidPredict(deci, A, B) return(ret) } } if(it >= maxiter -1) cat("maximum number of iterations reached",g1,g2) ret <- list(A=A, B=B) return(ret) }) ## Sigmoid predict function .SigmoidPredict <- function(deci, A, B) { fApB <- deci*A +B k <- length(deci) ret <- rep(0,k) bindex <- fApB >= 0 ret[bindex] <- exp(-fApB[bindex])/(1 + exp(-fApB[bindex])) ret[!bindex] <- 1/(1 + exp(fApB[!bindex])) return(ret) } kernlab/R/ipop.R0000644000175100001440000002544511304023134013202 0ustar hornikusers##ipop solves the quadratic programming problem ##minimize c' * primal + 1/2 primal' * H * primal ##subject to b <= A*primal <= b + r ## l <= x <= u ## d is the optimizer itself ##returns primal and dual variables (i.e. x and the Lagrange ##multipliers for b <= A * primal <= b + r) ##for additional documentation see ## R. Vanderbei ## LOQO: an Interior Point Code for Quadratic Programming, 1992 ## Author: R version Alexandros Karatzoglou, orig. matlab Alex J. Smola ## Created: 12/12/97 ## R Version: 12/08/03 ## Updated: 13/10/05 ## This code is released under the GNU Public License setGeneric("ipop",function(c, H, A, b, l, u, r, sigf=7, maxiter=40, margin=0.05, bound=10, verb=0) standardGeneric("ipop")) setMethod("ipop",signature(H="matrix"), function(c, H, A, b, l, u, r, sigf=7, maxiter=40, margin=0.05, bound=10, verb=0) { if(!is.matrix(H)) stop("H must be a matrix") if(!is.matrix(A)&&!is.vector(A)) stop("A must be a matrix or a vector") if(!is.matrix(c)&&!is.vector(c)) stop("c must be a matrix or a vector") if(!is.matrix(l)&&!is.vector(l)) stop("l must be a matrix or a vector") if(!is.matrix(u)&&!is.vector(u)) stop("u must be a matrix or a vector") n <- dim(H)[1] ## check for a decomposed H matrix if(n == dim(H)[2]) smw <- 0 if(n > dim(H)[2]) smw <- 1 if(n < dim(H)[2]) { smw <- 1 n <- dim(H)[2] H <- t(H) } if (is.vector(A)) A <- matrix(A,1) m <- dim(A)[1] primal <- rep(0,n) if (missing(b)) bvec <- rep(0, m) ## if(n !=nrow(H)) ## stop("H matrix is not symmetric") if (n != length(c)) stop("H and c are incompatible!") if (n != ncol(A)) stop("A and c are incompatible!") if (m != length(b)) stop("A and b are incompatible!") if(n !=length(u)) stop("u is incopatible with H") if(n !=length(l)) stop("l is incopatible with H") c <- matrix(c) l <- matrix(l) u <- matrix(u) m <- nrow(A) n <- ncol(A) H.diag <- diag(H) if(smw == 0) H.x <- H else if (smw == 1) H.x <- t(H) b.plus.1 <- max(svd(b)$d) + 1 c.plus.1 <- max(svd(c)$d) + 1 one.x <- -matrix(1,n,1) one.y <- -matrix(1,m,1) ## starting point if(smw == 0) diag(H.x) <- H.diag + 1 else smwn <- dim(H)[2] H.y <- diag(1,m) c.x <- c c.y <- b ## solve the system [-H.x A' A H.y] [x, y] = [c.x c.y] if(smw == 0) { AP <- matrix(0,m+n,m+n) xp <- 1:(m+n) <= n AP[xp,xp] <- -H.x AP[xp == FALSE,xp] <- A AP[xp,xp == FALSE] <- t(A) AP[xp == FALSE, xp== FALSE] <- H.y s.tmp <- solve(AP,c(c.x,c.y)) x <- s.tmp[1:n] y <- s.tmp[-(1:n)] } else { V <- diag(smwn) smwinner <- chol(V + crossprod(H)) smwa1 <- t(A) smwc1 <- c.x smwa2 <- smwa1 - (H %*% solve(smwinner,solve(t(smwinner),crossprod(H,smwa1)))) smwc2 <- smwc1 - (H %*% solve(smwinner,solve(t(smwinner),crossprod(H,smwc1)))) y <- solve(A %*% smwa2 + H.y , c.y + A %*% smwc2) x <- smwa2 %*% y - smwc2 } g <- pmax(abs(x - l), bound) z <- pmax(abs(x), bound) t <- pmax(abs(u - x), bound) s <- pmax(abs(x), bound) v <- pmax(abs(y), bound) w <- pmax(abs(y), bound) p <- pmax(abs(r - w), bound) q <- pmax(abs(y), bound) mu <- as.vector(crossprod(z,g) + crossprod(v,w) + crossprod(s,t) + crossprod(p,q))/(2 * (m + n)) sigfig <- 0 counter <- 0 alfa <- 1 if (verb > 0) # print at least one status report cat("Iter PrimalInf DualInf SigFigs Rescale PrimalObj DualObj","\n") while (counter < maxiter) { ## update the iteration counter counter <- counter + 1 ## central path (predictor) if(smw == 0) H.dot.x <- H %*% x else if (smw == 1) H.dot.x <- H %*% crossprod(H,x) rho <- b - A %*% x + w nu <- l - x + g tau <- u - x - t alpha <- r - w - p sigma <- c - crossprod(A, y) - z + s + H.dot.x beta <- y + q - v gamma.z <- - z gamma.w <- - w gamma.s <- - s gamma.q <- - q ## instrumentation x.dot.H.dot.x <- crossprod(x, H.dot.x) primal.infeasibility <- max(svd(rbind(rho, tau, matrix(alpha), nu))$d)/ b.plus.1 dual.infeasibility <- max(svd(rbind(sigma,t(t(beta))))$d) / c.plus.1 primal.obj <- crossprod(c,x) + 0.5 * x.dot.H.dot.x dual.obj <- crossprod(b,y) - 0.5 * x.dot.H.dot.x + crossprod(l, z) - crossprod(u,s) - crossprod(r,q) old.sigfig <- sigfig sigfig <- max(-log10(abs(primal.obj - dual.obj)/(abs(primal.obj) + 1)), 0) if (sigfig >= sigf) break if (verb > 0) # final report cat( counter, "\t", signif(primal.infeasibility,6), signif(dual.infeasibility,6), sigfig, alfa, primal.obj, dual.obj,"\n") ## some more intermediate variables (the hat section) hat.beta <- beta - v * gamma.w / w hat.alpha <- alpha - p * gamma.q / q hat.nu <- nu + g * gamma.z / z hat.tau <- tau - t * gamma.s / s ## the diagonal terms d <- z / g + s / t e <- 1 / (v / w + q / p) ## initialization before the big cholesky if (smw == 0) diag(H.x) <- H.diag + d diag(H.y) <- e c.x <- sigma - z * hat.nu / g - s * hat.tau / t c.y <- rho - e * (hat.beta - q * hat.alpha / p) ## and solve the system [-H.x A' A H.y] [delta.x, delta.y] <- [c.x c.y] if(smw == 0){ AP[xp,xp] <- -H.x AP[xp == FALSE, xp== FALSE] <- H.y s1.tmp <- solve(AP,c(c.x,c.y)) delta.x<-s1.tmp[1:n] ; delta.y <- s1.tmp[-(1:n)] } else { V <- diag(smwn) smwinner <- chol(V + chunkmult(t(H),2000,d)) smwa1 <- t(A) smwa1 <- smwa1 / d smwc1 <- c.x / d smwa2 <- t(A) - (H %*% solve(smwinner,solve(t(smwinner),crossprod(H,smwa1)))) smwa2 <- smwa2 / d smwc2 <- (c.x - (H %*% solve(smwinner,solve(t(smwinner),crossprod(H,smwc1)))))/d delta.y <- solve(A %*% smwa2 + H.y , c.y + A %*% smwc2) delta.x <- smwa2 %*% delta.y - smwc2 } ## backsubstitution delta.w <- - e * (hat.beta - q * hat.alpha / p + delta.y) delta.s <- s * (delta.x - hat.tau) / t delta.z <- z * (hat.nu - delta.x) / g delta.q <- q * (delta.w - hat.alpha) / p delta.v <- v * (gamma.w - delta.w) / w delta.p <- p * (gamma.q - delta.q) / q delta.g <- g * (gamma.z - delta.z) / z delta.t <- t * (gamma.s - delta.s) / s ## compute update step now (sebastian's trick) alfa <- - (1 - margin) / min(c(delta.g / g, delta.w / w, delta.t / t, delta.p / p, delta.z / z, delta.v / v, delta.s / s, delta.q / q, -1)) newmu <- (crossprod(z,g) + crossprod(v,w) + crossprod(s,t) + crossprod(p,q))/(2 * (m + n)) newmu <- mu * ((alfa - 1) / (alfa + 10))^2 gamma.z <- mu / g - z - delta.z * delta.g / g gamma.w <- mu / v - w - delta.w * delta.v / v gamma.s <- mu / t - s - delta.s * delta.t / t gamma.q <- mu / p - q - delta.q * delta.p / p ## some more intermediate variables (the hat section) hat.beta <- beta - v * gamma.w / w hat.alpha <- alpha - p * gamma.q / q hat.nu <- nu + g * gamma.z / z hat.tau <- tau - t * gamma.s / s ## initialization before the big cholesky ##for ( i in 1 : n H.x(i,i) <- H.diag(i) + d(i) ) { ##H.y <- diag(e) c.x <- sigma - z * hat.nu / g - s * hat.tau / t c.y <- rho - e * (hat.beta - q * hat.alpha / p) ## and solve the system [-H.x A' A H.y] [delta.x, delta.y] <- [c.x c.y] if (smw == 0) { AP[xp,xp] <- -H.x AP[xp == FALSE, xp== FALSE] <- H.y s1.tmp <- solve(AP,c(c.x,c.y)) delta.x<-s1.tmp[1:n] ; delta.y<-s1.tmp[-(1:n)] } else if (smw == 1) { smwc1 <- c.x / d smwc2 <- (c.x - (H %*% solve(smwinner,solve(t(smwinner),crossprod(H,smwc1))))) / d delta.y <- solve(A %*% smwa2 + H.y , c.y + A %*% smwc2) delta.x <- smwa2 %*% delta.y - smwc2 } ## backsubstitution delta.w <- - e * (hat.beta - q * hat.alpha / p + delta.y) delta.s <- s * (delta.x - hat.tau) / t delta.z <- z * (hat.nu - delta.x) / g delta.q <- q * (delta.w - hat.alpha) / p delta.v <- v * (gamma.w - delta.w) / w delta.p <- p * (gamma.q - delta.q) / q delta.g <- g * (gamma.z - delta.z) / z delta.t <- t * (gamma.s - delta.s) / s ## compute the updates alfa <- - (1 - margin) / min(c(delta.g / g, delta.w / w, delta.t / t, delta.p / p, delta.z / z, delta.v / v, delta.s / s, delta.q / q, -1)) x <- x + delta.x * alfa g <- g + delta.g * alfa w <- w + delta.w * alfa t <- t + delta.t * alfa p <- p + delta.p * alfa y <- y + delta.y * alfa z <- z + delta.z * alfa v <- v + delta.v * alfa s <- s + delta.s * alfa q <- q + delta.q * alfa ## these two lines put back in ? ## mu <- (crossprod(z,g) + crossprod(v,w) + crossprod(s,t) + crossprod(p,q))/(2 * (m + n)) ## mu <- mu * ((alfa - 1) / (alfa + 10))^2 mu <- newmu } if (verb > 0) ## final report cat( counter, primal.infeasibility, dual.infeasibility, sigfig, alfa, primal.obj, dual.obj) ret <- new("ipop") ## repackage the results primal(ret) <- x dual(ret) <- drop(y) if ((sigfig > sigf) & (counter < maxiter)) how(ret) <- 'converged' else { ## must have run out of counts if ((primal.infeasibility > 10e5) & (dual.infeasibility > 10e5)) how(ret) <- 'primal and dual infeasible' if (primal.infeasibility > 10e5) how(ret) <- 'primal infeasible' if (dual.infeasibility > 10e5) how(ret) <- 'dual infeasible' else ## don't really know how(ret) <- 'slow convergence, change bound?' } ret }) setGeneric("chunkmult",function(Z, csize, colscale) standardGeneric("chunkmult")) setMethod("chunkmult",signature(Z="matrix"), function(Z, csize, colscale) { n <- dim(Z)[1] m <- dim(Z)[2] d <- sqrt(colscale) nchunks <- ceiling(m/csize) res <- matrix(0,n,n) for( i in 1:nchunks) { lowerb <- (i - 1) * csize + 1 upperb <- min(i * csize, m) buffer <- t(Z[,lowerb:upperb,drop = FALSE]) bufferd <- d[lowerb:upperb] buffer <- buffer / bufferd res <- res + crossprod(buffer) } return(res) }) kernlab/R/rvm.R0000644000175100001440000004145712676465015013065 0ustar hornikusers## relevance vector machine ## author : alexandros setGeneric("rvm", function(x, ...) standardGeneric("rvm")) setMethod("rvm",signature(x="formula"), function (x, data=NULL, ..., subset, na.action = na.omit){ cl <- match.call() m <- match.call(expand.dots = FALSE) if (is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m$... <- NULL m$formula <- m$x m$x <- NULL m[[1L]] <- quote(stats::model.frame) m <- eval(m, parent.frame()) Terms <- attr(m, "terms") attr(Terms, "intercept") <- 0 x <- model.matrix(Terms, m) y <- model.extract(m, "response") ret <- rvm(x, y, ...) kcall(ret) <- cl terms(ret) <- Terms if (!is.null(attr(m, "na.action"))) n.action(ret) <- attr(m, "na.action") return (ret) }) setMethod("rvm",signature(x="vector"), function(x,...) { x <- t(t(x)) ret <- rvm(x, ...) ret }) setMethod("rvm",signature(x="list"), function (x, y, type = "regression", kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), alpha = 5, var = 0.1, # variance var.fix = FALSE, # fixed variance? iterations = 100, # no. of iterations verbosity = 0, tol = .Machine$double.eps, minmaxdiff = 1e-3, cross = 0, fit = TRUE, ... ,subset ,na.action = na.omit) { if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") K <- kernelMatrix(kernel,x) ret <- rvm(x=K, y=y, kernel=kernel, alpha = alpha, var= var, var.fix = var.fix, iterations = iterations, verbosity = verbosity, tol = tol, minmaxdiff=minmaxdiff,cross=cross,fit=fit, na.action=na.action) kernelf(ret) <- kernel xmatrix(ret) <- x return(ret) }) setMethod("rvm",signature(x="matrix"), function (x, y, type = "regression", kernel = "rbfdot", kpar = "automatic", alpha = ncol(as.matrix(x)), var = 0.1, # variance var.fix = FALSE, # fixed variance? iterations = 100, # no. of iterations verbosity = 0, tol = .Machine$double.eps, minmaxdiff = 1e-3, cross = 0, fit = TRUE, ... ,subset ,na.action = na.omit) { ## subsetting and na-handling for matrices ret <- new("rvm") if (!missing(subset)) x <- x[subset,] if (is.null(y)) x <- na.action(x) else { df <- na.action(data.frame(y, x)) y <- df[,1] x <- as.matrix(df[,-1]) } ncols <- ncol(x) m <- nrows <- nrow(x) if (is.null (type)) type(ret) <- if (is.factor(y)) "classification" else "regression" else type(ret) <- "regression" # in case of classification: transform factors into integers if (is.factor(y)) { stop("classification not supported with rvm, you can use ksvm(), lssvm() or gausspr()") } else { if (type(ret) == "classification" && any(as.integer (y) != y)) stop ("classification not supported with rvm, you can use ksvm(), lssvm() or gausspr()") if(type(ret) == "classification") lev(ret) <- unique (y) } # initialize nclass(ret) <- length (lev(ret)) if(!is.null(type)) type(ret) <- match.arg(type,c("classification", "regression")) if(is.character(kernel)){ kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot","matrix")) if(kernel == "matrix") if(dim(x)[1]==dim(x)[2]) return(rvm(as.kernelMatrix(x), y = y,type = type, alpha = alpha, var = var, # variance var.fix = var.fix, # fixed variance? iterations = iterations, # no. of iterations verbosity = verbosity, tol = tol, minmaxdiff = minmaxdiff, cross = cross, fit = fit ,subset ,na.action = na.omit, ...)) else stop(" kernel matrix not square!") if(is.character(kpar)) if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot") && kpar=="automatic" ) { cat (" Setting default kernel parameters ","\n") kpar <- list() } } if (!is.function(kernel)) if (!is.list(kpar)&&is.character(kpar)&&(class(kernel)=="rbfkernel" || class(kernel) =="laplacedot" || kernel == "laplacedot"|| kernel=="rbfdot")){ kp <- match.arg(kpar,"automatic") if(kp=="automatic") kpar <- list(sigma=mean(sigest(x,scaled=FALSE)[c(1,3)])) cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n") } if(!is(kernel,"kernel")) { if(is(kernel,"function")) kernel <- deparse(substitute(kernel)) kernel <- do.call(kernel, kpar) } if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'") if(length(alpha) == m) thetavec <- 1/alpha else if (length(alpha) == 1) thetavec <- rep(1/alpha, m) else stop("length of initial alpha vector is wrong (has to be one or equal with number of train data") wvec <- rep(1, m) piter <- iterations*0.4 if (type(ret) == "regression") { K <- kernelMatrix(kernel, x) diag(K) <- diag(K)+ 10e-7 Kml <- crossprod(K, y) for (i in 1:iterations) { nzindex <- thetavec > tol thetavec [!nzindex] <- wvec [!nzindex] <- 0 Kr <- K [ ,nzindex, drop = FALSE] thetatmp <- thetavec[nzindex] n <- sum (nzindex) Rinv <- backsolve(chol(crossprod(Kr)/var + diag(1/thetatmp)),diag(1,n)) ## compute the new wvec coefficients wvec [nzindex] <- (Rinv %*% (crossprod(Rinv, Kml [nzindex])))/var diagSigma <- rowSums(Rinv^2) ## error err <- sum ((y - Kr %*% wvec [nzindex])^2) if(var < 2e-9) { warning("Model might be overfitted") break } ## log some information if (verbosity > 0) { log.det.Sigma.inv <- - 2 * sum (log (diag (Rinv))) ## compute the marginal likelihood to monitor convergence mlike <- -1/2 * (log.det.Sigma.inv + sum (log (thetatmp)) + m * log (var) + 1/var * err + (wvec [nzindex]^2) %*% (1/thetatmp)) cat ("Marg. Likelihood =", formatC (mlike), "\tnRV=", n, "\tvar=", var, "\n") } ## compute zeta zeta <- 1 - diagSigma / thetatmp ## compute logtheta for convergence checking logtheta <- - log(thetavec[nzindex]) ## update thetavec if(i < piter){ thetavec [nzindex] <- wvec [nzindex]^2 / zeta thetavec [thetavec <= 0] <- 0 } else{ thetavec [nzindex] <- (wvec [nzindex]^2/zeta - diagSigma)/zeta thetavec [thetavec <= 0] <- 0 } ## Stop if largest alpha change is too small maxdiff <- max(abs(logtheta[thetavec[which(nzindex)]!=0] + log(thetavec[thetavec!=0]))) if(maxdiff < minmaxdiff) break; ## update variance if (!var.fix) { var <- err / (m - sum (zeta)) } } if(verbosity == 0) mlike(ret) <- drop(-1/2 * (-2*sum(log(diag(Rinv))) + sum (log (thetatmp)) + m * log (var) + 1/var * err + (wvec [nzindex]^2) %*% (1/thetatmp))) nvar(ret) <- var error(ret) <- sqrt(err/m) if(fit) fitted(ret) <- Kr %*% wvec [nzindex] } if(type(ret)=="classification") { stop("classification with the relevance vector machine not implemented yet") } kcall(ret) <- match.call() kernelf(ret) <- kernel alpha(ret) <- wvec[nzindex] tol(ret) <- tol xmatrix(ret) <- x ymatrix(ret) <- y RVindex(ret) <- which(nzindex) nRV(ret) <- length(RVindex(ret)) if (fit){ if(type(ret)=="classification") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="regression") error(ret) <- drop(crossprod(fitted(ret) - y)/m) } cross(ret) <- -1 if(cross!=0) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) if(type(ret)=="classification") { cret <- rvm(x[cind,],factor (lev(ret)[y[cind]], levels = lev(ret)),type=type(ret),kernel=kernel,alpha = alpha,var = var, var.fix=var.fix, tol=tol, cross = 0, fit = FALSE) cres <- predict(cret, x[vgr[[i]],]) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } if(type(ret)=="regression") { cret <- rvm(x[cind,],y[cind],type=type(ret),kernel=kernel,tol=tol,alpha = alpha, var = var, var.fix=var.fix, cross = 0, fit = FALSE) cres <- predict(cret, x[vgr[[i]],]) cerror <- drop(crossprod(cres - y[vgr[[i]]])/m) + cerror } } cross(ret) <- cerror } return(ret) }) setMethod("rvm",signature(x="kernelMatrix"), function (x, y, type = "regression", alpha = ncol(as.matrix(x)), var = 0.1, # variance var.fix = FALSE, # fixed variance? iterations = 100, # no. of iterations verbosity = 0, tol = .Machine$double.eps, minmaxdiff = 1e-3, cross = 0, fit = TRUE, ... ,subset ) { ## subsetting and na-handling for matrices ret <- new("rvm") if (!missing(subset)) x <- as.kernelMatrix(x[subset,subset]) if (is.null(y)) stop("response y missing") ncols <- ncol(x) m <- nrows <- nrow(x) if (is.null (type)) type(ret) <- if (is.factor(y)) "classification" else "regression" else type(ret) <- "regression" # in case of classification: transform factors into integers if (is.factor(y)) { stop("Claasification is not implemented, you can use ksvm(), gausspr() or lssvm()") } else { if (type(ret) == "classification" && any(as.integer (y) != y)) stop ("dependent variable has to be of factor or integer type for classification mode.") if(type(ret) == "classification") lev(ret) <- unique (y) } # initialize nclass(ret) <- length (lev(ret)) if(!is.null(type)) type(ret) <- match.arg(type,c("classification", "regression")) if(length(alpha) == m) thetavec <- 1/alpha else if (length(alpha) == 1) thetavec <- rep(1/alpha, m) else stop("length of initial alpha vector is wrong (has to be one or equal with number of train data") wvec <- rep(1, m) piter <- iterations*0.4 if (type(ret) == "regression") { Kml <- crossprod(x, y) for (i in 1:iterations) { nzindex <- thetavec > tol thetavec [!nzindex] <- wvec [!nzindex] <- 0 Kr <- x [ ,nzindex, drop = FALSE] thetatmp <- thetavec[nzindex] n <- sum (nzindex) Rinv <- backsolve(chol(crossprod(Kr)/var + diag(1/thetatmp)),diag(1,n)) ## compute the new wvec coefficients wvec [nzindex] <- (Rinv %*% (crossprod(Rinv, Kml [nzindex])))/var diagSigma <- rowSums(Rinv^2) ## error err <- sum ((y - Kr %*% wvec [nzindex])^2) if(var < 2e-9) { warning("Model might be overfitted") break } ## log some information if (verbosity > 0) { log.det.Sigma.inv <- - 2 * sum (log (diag (Rinv))) ## compute the marginal likelihood to monitor convergence mlike <- -1/2 * (log.det.Sigma.inv + sum (log (thetatmp)) + m * log (var) + 1/var * err + (wvec [nzindex]^2) %*% (1/thetatmp)) cat ("Marg. Likelihood =", formatC (mlike), "\tnRV=", n, "\tvar=", var, "\n") } ## compute zeta zeta <- 1 - diagSigma / thetatmp ## compute logtheta for convergence checking logtheta <- - log(thetavec[nzindex]) ## update thetavec if(i < piter){ thetavec [nzindex] <- wvec [nzindex]^2 / zeta thetavec [thetavec <= 0] <- 0 } else{ thetavec [nzindex] <- (wvec [nzindex]^2/zeta - diagSigma)/zeta thetavec [thetavec <= 0] <- 0 } ## Stop if largest alpha change is too small maxdiff <- max(abs(logtheta[thetavec[which(nzindex)]!=0] + log(thetavec[thetavec!=0]))) if(maxdiff < minmaxdiff) break; ## update variance if (!var.fix) { var <- err / (m - sum (zeta)) } } if(verbosity == 0) mlike(ret) <- drop(-1/2 * (-2*sum(log(diag(Rinv))) + sum (log (thetatmp)) + m * log (var) + 1/var * err + (wvec [nzindex]^2) %*% (1/thetatmp))) nvar(ret) <- var error(ret) <- sqrt(err/m) if(fit) fitted(ret) <- Kr %*% wvec [nzindex] } if(type(ret)=="classification") { stop("classification with the relevance vector machine not implemented yet") } kcall(ret) <- match.call() kernelf(ret) <- " Kernel Matrix used. \n" coef(ret) <- alpha(ret) <- wvec[nzindex] tol(ret) <- tol xmatrix(ret) <- x ymatrix(ret) <- y RVindex(ret) <- which(nzindex) nRV(ret) <- length(RVindex(ret)) if (fit){ if(type(ret)=="classification") error(ret) <- 1 - .classAgreement(table(y,as.integer(fitted(ret)))) if(type(ret)=="regression") error(ret) <- drop(crossprod(fitted(ret) - y)/m) } cross(ret) <- -1 if(cross!=0) { cerror <- 0 suppressWarnings(vgr<-split(sample(1:m,m),1:cross)) for(i in 1:cross) { cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length))))) if(type(ret)=="classification") { cret <- rvm(as.kernelMatrix(x[cind,cind]),factor (lev(ret)[y[cind]], levels = lev(ret)),type=type(ret),alpha = alpha,var = var, var.fix=var.fix, tol=tol, cross = 0, fit = FALSE) cres <- predict(cret, as.kernelMatrix(x[vgr[[i]], cind][,RVindex(cret),drop=FALSE])) cerror <- (1 - .classAgreement(table(y[vgr[[i]]],as.integer(cres))))/cross + cerror } if(type(ret)=="regression") { cret <- rvm(as.kernelMatrix(x[cind,cind]),y[cind],type=type(ret),tol=tol,alpha = alpha, var = var, var.fix=var.fix, cross = 0, fit = FALSE) cres <- predict(cret, as.kernelMatrix(x[vgr[[i]], cind][,RVindex(cret),drop=FALSE])) cerror <- drop(crossprod(cres - y[vgr[[i]]])/m)/cross + cerror } } cross(ret) <- cerror } return(ret) }) setMethod("predict", signature(object = "rvm"), function (object, newdata, ...) { if (missing(newdata)) return(fitted(object)) if(!is(newdata,"kernelMatrix") && !is(newdata,"list")){ ncols <- ncol(xmatrix(object)) nrows <- nrow(xmatrix(object)) oldco <- ncols if (!is.null(terms(object))) { newdata <- model.matrix(delete.response(terms(object)), as.data.frame(newdata), na.action = na.action) } else newdata <- if (is.vector (newdata)) t(t(newdata)) else as.matrix(newdata) newcols <- 0 newnrows <- nrow(newdata) newncols <- ncol(newdata) newco <- newncols if (oldco != newco) stop ("test vector does not match model !") p<-0 } if(type(object) == "regression") { if(is(newdata,"kernelMatrix")) ret <- newdata %*% coef(object) - b(object) if(is(newdata,"list")) ret <- kernelMult(kernelf(object),newdata,xmatrix(object)[RVindex(object)],alpha(object)) else ret <- kernelMult(kernelf(object),newdata,as.matrix(xmatrix(object)[RVindex(object),,drop=FALSE]),alpha(object)) } ret }) setMethod("show","rvm", function(object){ cat("Relevance Vector Machine object of class \"rvm\"","\n") cat("Problem type: regression","\n","\n") show(kernelf(object)) cat(paste("\nNumber of Relevance Vectors :", nRV(object),"\n")) cat("Variance : ",round(nvar(object),9)) cat("\n") if(!is.null(fitted(object))) cat(paste("Training error :", round(error(object),9),"\n")) if(cross(object)!= -1) cat("Cross validation error :",round(cross(object),9),"\n") ##train error & loss }) kernlab/vignettes/0000755000175100001440000000000012774400037013722 5ustar hornikuserskernlab/vignettes/jss.bib0000644000175100001440000003410612055335060015176 0ustar hornikusers@Article{kernlab:Karatzoglou+Smola+Hornik:2004, author = {Alexandros Karatzoglou and Alex Smola and Kurt Hornik and Achim Zeileis}, title = {kernlab -- An \proglang{S4} Package for Kernel Methods in \proglang{R}}, year = {2004}, journal = {Journal of Statistical Software}, volume = {11}, number = {9}, pages = {1--20}, url = {http://www.jstatsoft.org/v11/i09/} } @Book{kernlab:Schoelkopf+Smola:2002, author = {Bernhard Sch\"olkopf and Alex Smola}, title = {Learning with Kernels}, publisher = {MIT Press}, year = 2002, } @Book{kernlab:Chambers:1998, Author = {John M. Chambers}, title = {Programming with Data}, Publisher = {Springer, New York}, Year = 1998, note = {ISBN 0-387-98503-4}, } @Book{kernlab:Hastie:2001, author = {T. Hastie and R. Tibshirani and J. H. Friedman}, title = {The Elements of Statistical Learning}, publisher = {Springer}, Year = 2001, } @Article{kernlab:Vanderbei:1999, author = {Robert Vanderbei}, title = {{LOQO}: An Interior Point Code for Quadratic Programming}, journal = {Optimization Methods and Software}, year = 1999, volume = 12, pages = {251--484}, url = {http://www.sor.princeton.edu/~rvdb/ps/loqo6.pdf}, } @Misc{kernlab:Leisch+Dimitriadou, author = {Fiedrich Leisch and Evgenia Dimitriadou}, title = {\pkg{mlbench}---{A} Collection for Artificial and Real-world Machine Learning Benchmarking Problems}, howpublished = {\textsf{R} package, Version 0.5-6}, note = {Available from \url{http://CRAN.R-project.org}}, year = 2001, month = 12, } @Misc{kernlab:Roever:2004, author = {Christian Roever and Nils Raabe and Karsten Luebke and Uwe Ligges}, title = { \pkg{klaR} -- Classification and Visualization}, howpublished = {\textsf{R} package, Version 0.3-3}, note = {Available from \url{http://cran.R-project.org}}, year = 2004, month = 7, } @Article{kernlab:Hsu+Lin:2002, author = {C.-W. Hsu and Chih-Jen Lin}, title = {A Comparison of Methods for Multi-class Support Vector Machines}, journal = {IEEE Transactions on Neural Networks}, year = 2002, volume = 13, pages = {415--425}, url = {http://www.csie.ntu.edu.tw/~cjlin/papers/multisvm.ps.gz}, } @Misc{kernlab:Chang+Lin:2001, author = {Chih-Chung Chang and Chih-Jen Lin}, title = {{LIBSVM}: A Library for Support Vector Machines}, note = {Software available at \url{http://www.csie.ntu.edu.tw/~cjlin/libsvm}}, year = 2001, } @Article{kernlab:Platt:2000, Author = {J. C. Platt}, Title = {Probabilistic Outputs for Support Vector Machines and Comparison to Regularized Likelihood Methods}, Journal = {Advances in Large Margin Classifiers, A. Smola, P. Bartlett, B. Sch\"olkopf and D. Schuurmans, Eds.}, Year = 2000, publisher = {Cambridge, MA: MIT Press}, url = {http://citeseer.nj.nec.com/platt99probabilistic.html}, } @Article{kernlab:Platt:1998, Author = {J. C. Platt}, Title = {Probabilistic Outputs for Support Vector Machines and Comparison to Regularized Likelihood Methods}, Journal = {B. Sch\"olkopf, C. J. C. Burges, A. J. Smola, editors, Advances in Kernel Methods --- Support Vector Learning}, Year = 1998, publisher = {Cambridge, MA: MIT Press}, url = {http://research.microsoft.com/~jplatt/abstracts/smo.html}, } @Article{kernlab:Keerthi:2002, Author = {S. S. Kerthi and E. G. Gilbert}, Title = {Convergence of a Generalized {SMO} Algorithm for {SVM} Classifier Design}, Journal = {Machine Learning}, pages = {351--360}, Year = 2002, volume = 46, url = {http://guppy.mpe.nus.edu.sg/~mpessk/svm/conv_ml.ps.gz}, } @Article{kernlab:Olvi:2000, Author = {Alex J. Smola and Olvi L. Mangasarian and Bernhard Sch\"olkopf}, Title = {Sparse Kernel Feature Analysis}, Journal = {24th Annual Conference of Gesellschaft fr Klassifikation}, publisher = {University of Passau}, Year = 2000, url = {ftp://ftp.cs.wisc.edu/pub/dmi/tech-reports/99-04.ps}, } @Unpublished{kernlab:Lin:2001, Author = {H.-T. Lin and Chih-Jen Lin and R. C. Weng}, Title = {A Note on {Platt's} Probabilistic Outputs for Support Vector Machines}, Year = 2001, note = {Available at \url{http://www.csie.ntu.edu.tw/~cjlin/papers/plattprob.ps}}, } @Unpublished{kernlab:Weng:2004, Author = {C.-J Lin and R C. Weng}, Title = {Probabilistic Predictions for Support Vector Regression}, Year = 2004, note = {Available at \url{http://www.csie.ntu.edu.tw/~cjlin/papers/svrprob.pdf}}, } @Article{kernlab:Crammer:2000, Author = {K. Crammer and Y. Singer}, Title = {On the Learnability and Design of Output Codes for Multiclass Prolems}, Year = 2000, Journal = {Computational Learning Theory}, Pages = {35--46}, url = {http://www.cs.huji.ac.il/~kobics/publications/mlj01.ps.gz}, } @Article{kernlab:joachim:1999, Author = {Thorsten Joachims}, Title = {Making Large-scale {SVM} Learning Practical}, Journal = {In Advances in Kernel Methods --- Support Vector Learning}, Chapter = 11, Year = 1999, publisher = {MIT Press}, url = {http://www-ai.cs.uni-dortmund.de/DOKUMENTE/joachims_99a.ps.gz}, } @Article{kernlab:Meyer:2001, author = {David Meyer}, title = {Support Vector Machines}, journal = {R News}, year = 2001, volume = 1, number = 3, pages = {23--26}, month = {September}, url = {http://CRAN.R-project.org/doc/Rnews/}, note = {\url{http://CRAN.R-project.org/doc/Rnews/}} } @ARTICLE{kernlab:meyer+leisch+hornik:2003, AUTHOR = {David Meyer and Friedrich Leisch and Kurt Hornik}, TITLE = {The Support Vector Machine under Test}, JOURNAL = {Neurocomputing}, YEAR = 2003, MONTH = {September}, PAGES = {169--186}, VOLUME = 55, } @Book{kernlab:Vapnik:1998, author = {Vladimir Vapnik}, Title = {Statistical Learning Theory}, Year = 1998, publisher = {Wiley, New York}, } @Book{kernlab:Vapnik2:1995, author = {Vladimir Vapnik}, Title = {The Nature of Statistical Learning Theory}, Year = 1995, publisher = {Springer, NY}, } @Article{kernlab:Wu:2003, Author = {Ting-Fan Wu and Chih-Jen Lin and Ruby C. Weng}, Title = {Probability Estimates for Multi-class Classification by Pairwise Coupling}, Year = 2003, Journal = {Advances in Neural Information Processing}, Publisher = {MIT Press Cambridge Mass.}, Volume = 16, url = {http://books.nips.cc/papers/files/nips16/NIPS2003_0538.pdf}, } @Article{kernlab:Williams:1995, Author = {Christopher K. I. Williams and Carl Edward Rasmussen}, Title = {Gaussian Processes for Regression}, Year = 1995, Journal = {Advances in Neural Information Processing}, Publisher = {MIT Press Cambridge Mass.}, Volume = 8, url = {http://books.nips.cc/papers/files/nips08/0514.pdf}, } @Article{kernlab:Schoelkopf:1998, Author = {B. Sch\"olkopf and A. Smola and K. R. M\"uller}, Title = {Nonlinear Component Analysis as a Kernel Eigenvalue Problem}, Journal = {Neural Computation}, Volume = 10, Pages = {1299--1319}, Year = 1998, url = {http://mlg.anu.edu.au/~smola/papers/SchSmoMul98.pdf}, } @Article{kernlab:Tipping:2001, Author = {M. E. Tipping}, Title = {Sparse Bayesian Learning and the Relevance Vector Machine}, Journal = {Journal of Machine Learning Research}, Volume = 1, Year = 2001, Pages = {211--244}, url = {http://www.jmlr.org/papers/volume1/tipping01a/tipping01a.pdf}, } @Article{kernlab:Zhou:2003, Author = {D. Zhou and J. Weston and A. Gretton and O. Bousquet and B. Sch\"olkopf}, Title = {Ranking on Data Manifolds}, Journal = {Advances in Neural Information Processing Systems}, Volume = 16, Year = 2003, Publisher = {MIT Press Cambridge Mass.}, url = {http://www.kyb.mpg.de/publications/pdfs/pdf2334.pdf}, } @Article{kernlab:Andrew:2001, Author = {Andrew Y. Ng and Michael I. Jordan and Yair Weiss}, Title = {On Spectral Clustering: Analysis and an Algorithm}, Journal = {Advances in Neural Information Processing Systems}, Volume = 14, Publisher = {MIT Press Cambridge Mass.}, url = {http://www.nips.cc/NIPS2001/papers/psgz/AA35.ps.gz}, } @Article{kernlab:Caputo:2002, Author = {B. Caputo and K. Sim and F. Furesjo and A. Smola}, Title = {Appearance-based Object Recognition using {SVMs}: Which Kernel Should {I} Use?}, Journal = {Proc of NIPS workshop on Statistical methods for computational experiments in visual processing and computer vision, Whistler, 2002}, Year = 2002, } @Article{kernlab:Putten:2000, Author = {Peter van der Putten and Michel de Ruiter and Maarten van Someren}, Title = {CoIL Challenge 2000 Tasks and Results: Predicting and Explaining Caravan Policy Ownership}, Journal = {Coil Challenge 2000}, Year = 2000, url = {http://www.liacs.nl/~putten/library/cc2000/}, } @Article{kernlab:Hsu:2002, Author = {C.-W. Hsu and Chih-Jen Lin}, Title = {A Simple Decomposition Method for Support Vector Machines}, Journal = {Machine Learning}, Year = 2002, Pages = {291--314}, volume = 46, url = {http://www.csie.ntu.edu.tw/~cjlin/papers/decomp.ps.gz}, } @Article{kernlab:Knerr:1990, Author = {S. Knerr and L. Personnaz and G. Dreyfus}, Title = {Single-layer Learning Revisited: A Stepwise Procedure for Building and Training a Neural Network.}, Journal = {J. Fogelman, editor, Neurocomputing: Algorithms, Architectures and Applications}, Publisher = {Springer-Verlag}, Year = 1990, } @Article{kernlab:Kressel:1999, Author = {U. Kre{\ss}el}, Title = {Pairwise Classification and Support Vector Machines}, Year = 1999, Journal = {B. Sch\"olkopf, C. J. C. Burges, A. J. Smola, editors, Advances in Kernel Methods --- Support Vector Learning}, Pages = {255--268}, Publisher = {Cambridge, MA, MIT Press}, } @Article{kernlab:Hsu2:2002, Title = {A Comparison of Methods for Multi-class Support Vector Machines}, Author = {C.-W. Hsu and Chih-Jen Lin}, Journal = {IEEE Transactions on Neural Networks}, Volume = 13, Year = 2002, Pages = {1045--1052}, url = {http://www.csie.ntu.edu.tw/~cjlin/papers/multisvm.ps.gz}, } @Article{kernlab:Tax:1999, Title = {Support Vector Domain Description}, Author = {David M. J. Tax and Robert P. W. Duin}, Journal = {Pattern Recognition Letters}, Volume = 20, Pages = {1191--1199}, Year = 1999, Publisher = {Elsevier}, url = {http://www.ph.tn.tudelft.nl/People/bob/papers/prl_99_svdd.pdf}, } @Article{kernlab:Williamson:1999, Title = {Estimating the Support of a High-Dimensonal Distribution}, Author = {B. Sch\"olkopf and J. Platt and J. Shawe-Taylor and A. J. Smola and R. C. Williamson}, Journal = {Microsoft Research, Redmond, WA}, Volume = {TR 87}, Year = 1999, url = {http://research.microsoft.com/research/pubs/view.aspx?msr_tr_id=MSR-TR-99-87}, } @Article{kernlab:Smola1:2000, Title = {New Support Vector Algorithms}, Author = {B. Sch\"olkopf and A. J. Smola and R. C. Williamson and P. L. Bartlett}, Journal = {Neural Computation}, Volume = 12, Year = 2000, Pages = {1207--1245}, url = {http://caliban.ingentaselect.com/vl=3338649/cl=47/nw=1/rpsv/cgi-bin/cgi?body=linker&reqidx=0899-7667(2000)12:5L.1207}, } @Article{kernlab:Wright:1999, Title = {Modified {Cholesky} Factorizations in Interior-point Algorithms for Linear Programming}, Author = {S. Wright}, Journal = {Journal in Optimization}, Volume = 9, publisher = {SIAM}, Year = 1999, Pages = {1159--1191}, ur = {http://www-unix.mcs.anl.gov/~wright/papers/P600.pdf}, } @Article{kernlab:more:1999, Title = {Newton's Method for Large-scale Bound Constrained Problems}, Author = {Chih-Jen Lin and J. J. More}, Journal = {SIAM Journal on Optimization}, volume = 9, pages = {1100--1127}, Year = 1999, } @Article{kernlab:Ng:2001, Title = {On Spectral Clustering: Analysis and an Algorithm}, Author = {Andrew Y. Ng and Michael I. Jordan and Yair Weiss}, Journal = {Neural Information Processing Symposium 2001}, Year = 2001, url = {http://www.nips.cc/NIPS2001/papers/psgz/AA35.ps.gz} } @Article{kernlab:kuss:2003, Title = {The Geometry of Kernel Canonical Correlation Analysis}, Author = {Malte Kuss and Thore Graepel}, Journal = {MPI-Technical Reports}, url = {http://www.kyb.mpg.de/publication.html?publ=2233}, Year = 2003, } %% Mathias Seeger gp pub. @Article{kernlab:Kivinen:2004, Title = {Online Learning with Kernels}, Author = {Jyrki Kivinen and Alexander Smola and Robert Williamson}, Journal ={IEEE Transactions on Signal Processing}, volume = 52, Year = 2004, url = {http://mlg.anu.edu.au/~smola/papers/KivSmoWil03.pdf}, } kernlab/vignettes/A.cls0000644000175100001440000001273612055335060014611 0ustar hornikusers\def\fileversion{1.0} \def\filename{A} \def\filedate{2004/10/08} %% %% \NeedsTeXFormat{LaTeX2e} \ProvidesClass{A}[\filedate\space\fileversion\space A class ] %% options \LoadClass[10pt,a4paper,twoside]{article} \newif\if@notitle \@notitlefalse \DeclareOption{notitle}{\@notitletrue} \ProcessOptions %% required packages \RequirePackage{graphicx,a4wide,color,hyperref,ae,fancyvrb,thumbpdf} \RequirePackage[T1]{fontenc} \usepackage[authoryear,round,longnamesfirst]{natbib} \bibpunct{(}{)}{;}{a}{}{,} \bibliographystyle{jss} %% paragraphs \setlength{\parskip}{0.7ex plus0.1ex minus0.1ex} \setlength{\parindent}{0em} %% commands \let\code=\texttt \let\proglang=\textsf \newcommand{\pkg}[1]{{\normalfont\fontseries{b}\selectfont #1}} \newcommand{\email}[1]{\href{mailto:#1}{\normalfont\texttt{#1}}} \newcommand{\E}{\mathsf{E}} \newcommand{\VAR}{\mathsf{VAR}} \newcommand{\COV}{\mathsf{COV}} \newcommand{\Prob}{\mathsf{P}} %% for all publications \newcommand{\Plaintitle}[1]{\def\@Plaintitle{#1}} \newcommand{\Shorttitle}[1]{\def\@Shorttitle{#1}} \newcommand{\Plainauthor}[1]{\def\@Plainauthor{#1}} \newcommand{\Keywords}[1]{\def\@Keywords{#1}} \newcommand{\Plainkeywords}[1]{\def\@Plainkeywords{#1}} \newcommand{\Abstract}[1]{\def\@Abstract{#1}} %% defaults \author{Firstname Lastname\\Affiliation} \title{Title} \Abstract{---!!!---an abstract is required---!!!---} \Plainauthor{\@author} \Plaintitle{\@title} \Shorttitle{\@title} \Keywords{---!!!---at least one keyword is required---!!!---} \Plainkeywords{\@Keywords} %% Sweave(-like) %\DefineVerbatimEnvironment{Sinput}{Verbatim}{fontshape=sl} %\DefineVerbatimEnvironment{Soutput}{Verbatim}{} %\DefineVerbatimEnvironment{Scode}{Verbatim}{fontshape=sl} %\newenvironment{Schunk}{}{} \DefineVerbatimEnvironment{Code}{Verbatim}{} \DefineVerbatimEnvironment{CodeInput}{Verbatim}{fontshape=sl} \DefineVerbatimEnvironment{CodeOutput}{Verbatim}{} \newenvironment{CodeChunk}{}{} \setkeys{Gin}{width=0.8\textwidth} %% new \maketitle \def\maketitle{ \begingroup \def\thefootnote{\fnsymbol{footnote}} \def\@makefnmark{\hbox to 0pt{$^{\@thefnmark}$\hss}} \long\def\@makefntext##1{\parindent 1em\noindent \hbox to1.8em{\hss $\m@th ^{\@thefnmark}$}##1} \@maketitle \@thanks \endgroup \setcounter{footnote}{0} \thispagestyle{empty} \markboth{\centerline{\@Shorttitle}}{\centerline{\@Plainauthor}} \pagestyle{myheadings} \let\maketitle\relax \let\@maketitle\relax \gdef\@thanks{}\gdef\@author{}\gdef\@title{}\let\thanks\relax } \def\@maketitle{\vbox{\hsize\textwidth \linewidth\hsize {\centering {\LARGE\bf \@title\par} \def\And{\end{tabular}\hfil\linebreak[0]\hfil \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\ignorespaces}% \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\@author\end{tabular}% \vskip 0.3in minus 0.1in \hrule \begin{abstract} \@Abstract \end{abstract}} \textit{Keywords}:~\@Keywords. \vskip 0.1in minus 0.05in \hrule \vskip 0.2in minus 0.1in }} %% sections, subsections, and subsubsections \newlength{\preXLskip} \newlength{\preLskip} \newlength{\preMskip} \newlength{\preSskip} \newlength{\postMskip} \newlength{\postSskip} \setlength{\preXLskip}{1.8\baselineskip plus 0.5ex minus 0ex} \setlength{\preLskip}{1.5\baselineskip plus 0.3ex minus 0ex} \setlength{\preMskip}{1\baselineskip plus 0.2ex minus 0ex} \setlength{\preSskip}{.8\baselineskip plus 0.2ex minus 0ex} \setlength{\postMskip}{.5\baselineskip plus 0ex minus 0.1ex} \setlength{\postSskip}{.3\baselineskip plus 0ex minus 0.1ex} \newcommand{\jsssec}[2][default]{\vskip \preXLskip% \pdfbookmark[1]{#1}{Section.\thesection.#1}% \refstepcounter{section}% \centerline{\textbf{\Large \thesection. #2}} \nopagebreak \vskip \postMskip \nopagebreak} \newcommand{\jsssecnn}[1]{\vskip \preXLskip% \centerline{\textbf{\Large #1}} \nopagebreak \vskip \postMskip \nopagebreak} \newcommand{\jsssubsec}[2][default]{\vskip \preMskip% \pdfbookmark[2]{#1}{Subsection.\thesubsection.#1}% \refstepcounter{subsection}% \textbf{\large \thesubsection. #2} \nopagebreak \vskip \postSskip \nopagebreak} \newcommand{\jsssubsecnn}[1]{\vskip \preMskip% \textbf{\large #1} \nopagebreak \vskip \postSskip \nopagebreak} \newcommand{\jsssubsubsec}[2][default]{\vskip \preSskip% \pdfbookmark[3]{#1}{Subsubsection.\thesubsubsection.#1}% \refstepcounter{subsubsection}% {\large \textit{#2}} \nopagebreak \vskip \postSskip \nopagebreak} \newcommand{\jsssubsubsecnn}[1]{\vskip \preSskip% {\textit{\large #1}} \nopagebreak \vskip \postSskip \nopagebreak} \newcommand{\jsssimplesec}[2][default]{\vskip \preLskip% %% \pdfbookmark[1]{#1}{Section.\thesection.#1}% \refstepcounter{section}% \textbf{\large #1} \nopagebreak \vskip \postSskip \nopagebreak} \newcommand{\jsssimplesecnn}[1]{\vskip \preLskip% \textbf{\large #1} \nopagebreak \vskip \postSskip \nopagebreak} \renewcommand{\section}{\secdef \jsssec \jsssecnn} \renewcommand{\subsection}{\secdef \jsssubsec \jsssubsecnn} \renewcommand{\subsubsection}{\secdef \jsssubsubsec \jsssubsubsecnn} %% colors \definecolor{Red}{rgb}{0.7,0,0} \definecolor{Blue}{rgb}{0,0,0.8} \hypersetup{% hyperindex = {true}, colorlinks = {true}, linktocpage = {true}, plainpages = {false}, linkcolor = {Blue}, citecolor = {Blue}, urlcolor = {Red}, pdfstartview = {Fit}, pdfpagemode = {UseOutlines}, pdfview = {XYZ null null null} } \AtBeginDocument{ \hypersetup{% pdfauthor = {\@Plainauthor}, pdftitle = {\@Plaintitle}, pdfkeywords = {\@Plainkeywords} } } \if@notitle %% \AtBeginDocument{\maketitle} \else \AtBeginDocument{\maketitle} \fi kernlab/vignettes/kernlab.Rnw0000644000175100001440000014230512055335060016030 0ustar hornikusers\documentclass{A} \usepackage{amsfonts,thumbpdf,alltt} \newenvironment{smallverbatim}{\small\verbatim}{\endverbatim} \newenvironment{smallexample}{\begin{alltt}\small}{\end{alltt}} \SweaveOpts{engine=R,eps=FALSE} %\VignetteIndexEntry{kernlab - An S4 Package for Kernel Methods in R} %\VignetteDepends{kernlab} %\VignetteKeywords{kernel methods, support vector machines, quadratic programming, ranking, clustering, S4, R} %\VignettePackage{kernlab} <>= library(kernlab) options(width = 70) @ \title{\pkg{kernlab} -- An \proglang{S4} Package for Kernel Methods in \proglang{R}} \Plaintitle{kernlab - An S4 Package for Kernel Methods in R} \author{Alexandros Karatzoglou\\Technische Universit\"at Wien \And Alex Smola\\Australian National University, NICTA \And Kurt Hornik\\Wirtschaftsuniversit\"at Wien } \Plainauthor{Alexandros Karatzoglou, Alex Smola, Kurt Hornik} \Abstract{ \pkg{kernlab} is an extensible package for kernel-based machine learning methods in \proglang{R}. It takes advantage of \proglang{R}'s new \proglang{S4} object model and provides a framework for creating and using kernel-based algorithms. The package contains dot product primitives (kernels), implementations of support vector machines and the relevance vector machine, Gaussian processes, a ranking algorithm, kernel PCA, kernel CCA, kernel feature analysis, online kernel methods and a spectral clustering algorithm. Moreover it provides a general purpose quadratic programming solver, and an incomplete Cholesky decomposition method. } \Keywords{kernel methods, support vector machines, quadratic programming, ranking, clustering, \proglang{S4}, \proglang{R}} \Plainkeywords{kernel methods, support vector machines, quadratic programming, ranking, clustering, S4, R} \begin{document} \section{Introduction} Machine learning is all about extracting structure from data, but it is often difficult to solve problems like classification, regression and clustering in the space in which the underlying observations have been made. Kernel-based learning methods use an implicit mapping of the input data into a high dimensional feature space defined by a kernel function, i.e., a function returning the inner product $ \langle \Phi(x),\Phi(y) \rangle$ between the images of two data points $x, y$ in the feature space. The learning then takes place in the feature space, provided the learning algorithm can be entirely rewritten so that the data points only appear inside dot products with other points. This is often referred to as the ``kernel trick'' \citep{kernlab:Schoelkopf+Smola:2002}. More precisely, if a projection $\Phi: X \rightarrow H$ is used, the dot product $\langle\Phi(x),\Phi(y)\rangle$ can be represented by a kernel function~$k$ \begin{equation} \label{eq:kernel} k(x,y)= \langle \Phi(x),\Phi(y) \rangle, \end{equation} which is computationally simpler than explicitly projecting $x$ and $y$ into the feature space~$H$. One interesting property of kernel-based systems is that, once a valid kernel function has been selected, one can practically work in spaces of any dimension without paying any computational cost, since feature mapping is never effectively performed. In fact, one does not even need to know which features are being used. Another advantage is the that one can design and use a kernel for a particular problem that could be applied directly to the data without the need for a feature extraction process. This is particularly important in problems where a lot of structure of the data is lost by the feature extraction process (e.g., text processing). The inherent modularity of kernel-based learning methods allows one to use any valid kernel on a kernel-based algorithm. \subsection{Software review} The most prominent kernel based learning algorithm is without doubt the support vector machine (SVM), so the existence of many support vector machine packages comes as little surprise. Most of the existing SVM software is written in \proglang{C} or \proglang{C++}, e.g.\ the award winning \pkg{libsvm}\footnote{\url{http://www.csie.ntu.edu.tw/~cjlin/libsvm/}} \citep{kernlab:Chang+Lin:2001}, \pkg{SVMlight}\footnote{\url{http://svmlight.joachims.org}} \citep{kernlab:joachim:1999}, \pkg{SVMTorch}\footnote{\url{http://www.torch.ch}}, Royal Holloway Support Vector Machines\footnote{\url{http://svm.dcs.rhbnc.ac.uk}}, \pkg{mySVM}\footnote{\url{http://www-ai.cs.uni-dortmund.de/SOFTWARE/MYSVM/index.eng.html}}, and \pkg{M-SVM}\footnote{\url{http://www.loria.fr/~guermeur/}} with many packages providing interfaces to \proglang{MATLAB} (such as \pkg{libsvm}), and even some native \proglang{MATLAB} toolboxes\footnote{ \url{http://www.isis.ecs.soton.ac.uk/resources/svminfo/}}\,\footnote{ \url{http://asi.insa-rouen.fr/~arakotom/toolbox/index}}\,\footnote{ \url{http://www.cis.tugraz.at/igi/aschwaig/software.html}}. Putting SVM specific software aside and considering the abundance of other kernel-based algorithms published nowadays, there is little software available implementing a wider range of kernel methods with some exceptions like the \pkg{Spider}\footnote{\url{http://www.kyb.tuebingen.mpg.de/bs/people/spider/}} software which provides a \proglang{MATLAB} interface to various \proglang{C}/\proglang{C++} SVM libraries and \proglang{MATLAB} implementations of various kernel-based algorithms, \pkg{Torch} \footnote{\url{http://www.torch.ch}} which also includes more traditional machine learning algorithms, and the occasional \proglang{MATLAB} or \proglang{C} program found on a personal web page where an author includes code from a published paper. \subsection[R software]{\proglang{R} software} The \proglang{R} package \pkg{e1071} offers an interface to the award winning \pkg{libsvm} \citep{kernlab:Chang+Lin:2001}, a very efficient SVM implementation. \pkg{libsvm} provides a robust and fast SVM implementation and produces state of the art results on most classification and regression problems \citep{kernlab:Meyer+Leisch+Hornik:2003}. The \proglang{R} interface provided in \pkg{e1071} adds all standard \proglang{R} functionality like object orientation and formula interfaces to \pkg{libsvm}. Another SVM related \proglang{R} package which was made recently available is \pkg{klaR} \citep{kernlab:Roever:2004} which includes an interface to \pkg{SVMlight}, a popular SVM implementation along with other classification tools like Regularized Discriminant Analysis. However, most of the \pkg{libsvm} and \pkg{klaR} SVM code is in \proglang{C++}. Therefore, if one would like to extend or enhance the code with e.g.\ new kernels or different optimizers, one would have to modify the core \proglang{C++} code. \section[kernlab]{\pkg{kernlab}} \pkg{kernlab} aims to provide the \proglang{R} user with basic kernel functionality (e.g., like computing a kernel matrix using a particular kernel), along with some utility functions commonly used in kernel-based methods like a quadratic programming solver, and modern kernel-based algorithms based on the functionality that the package provides. Taking advantage of the inherent modularity of kernel-based methods, \pkg{kernlab} aims to allow the user to switch between kernels on an existing algorithm and even create and use own kernel functions for the kernel methods provided in the package. \subsection[S4 objects]{\proglang{S4} objects} \pkg{kernlab} uses \proglang{R}'s new object model described in ``Programming with Data'' \citep{kernlab:Chambers:1998} which is known as the \proglang{S4} class system and is implemented in the \pkg{methods} package. In contrast with the older \proglang{S3} model for objects in \proglang{R}, classes, slots, and methods relationships must be declared explicitly when using the \proglang{S4} system. The number and types of slots in an instance of a class have to be established at the time the class is defined. The objects from the class are validated against this definition and have to comply to it at any time. \proglang{S4} also requires formal declarations of methods, unlike the informal system of using function names to identify a certain method in \proglang{S3}. An \proglang{S4} method is declared by a call to \code{setMethod} along with the name and a ``signature'' of the arguments. The signature is used to identify the classes of one or more arguments of the method. Generic functions can be declared using the \code{setGeneric} function. Although such formal declarations require package authors to be more disciplined than when using the informal \proglang{S3} classes, they provide assurance that each object in a class has the required slots and that the names and classes of data in the slots are consistent. An example of a class used in \pkg{kernlab} is shown below. Typically, in a return object we want to include information on the result of the method along with additional information and parameters. Usually \pkg{kernlab}'s classes include slots for the kernel function used and the results and additional useful information. \begin{smallexample} setClass("specc", representation("vector", # the vector containing the cluster centers="matrix", # the cluster centers size="vector", # size of each cluster kernelf="function", # kernel function used withinss = "vector"), # within cluster sum of squares prototype = structure(.Data = vector(), centers = matrix(), size = matrix(), kernelf = ls, withinss = vector())) \end{smallexample} Accessor and assignment function are defined and used to access the content of each slot which can be also accessed with the \verb|@| operator. \subsection{Namespace} Namespaces were introduced in \proglang{R} 1.7.0 and provide a means for packages to control the way global variables and methods are being made available. Due to the number of assignment and accessor function involved, a namespace is used to control the methods which are being made visible outside the package. Since \proglang{S4} methods are being used, the \pkg{kernlab} namespace also imports methods and variables from the \pkg{methods} package. \subsection{Data} The \pkg{kernlab} package also includes data set which will be used to illustrate the methods included in the package. The \code{spam} data set \citep{kernlab:Hastie:2001} set collected at Hewlett-Packard Labs contains data on 2788 and 1813 e-mails classified as non-spam and spam, respectively. The 57 variables of each data vector indicate the frequency of certain words and characters in the e-mail. Another data set included in \pkg{kernlab}, the \code{income} data set \citep{kernlab:Hastie:2001}, is taken by a marketing survey in the San Francisco Bay concerning the income of shopping mall customers. It consists of 14 demographic attributes (nominal and ordinal variables) including the income and 8993 observations. The \code{ticdata} data set \citep{kernlab:Putten:2000} was used in the 2000 Coil Challenge and contains information on customers of an insurance company. The data consists of 86 variables and includes product usage data and socio-demographic data derived from zip area codes. The data was collected to answer the following question: Can you predict who would be interested in buying a caravan insurance policy and give an explanation why? The \code{promotergene} is a data set of E. Coli promoter gene sequences (DNA) with 106 observations and 58 variables available at the UCI Machine Learning repository. Promoters have a region where a protein (RNA polymerase) must make contact and the helical DNA sequence must have a valid conformation so that the two pieces of the contact region spatially align. The data contains DNA sequences of promoters and non-promoters. The \code{spirals} data set was created by the \code{mlbench.spirals} function in the \pkg{mlbench} package \citep{kernlab:Leisch+Dimitriadou}. This two-dimensional data set with 300 data points consists of two spirals where Gaussian noise is added to each data point. \subsection{Kernels} A kernel function~$k$ calculates the inner product of two vectors $x$, $x'$ in a given feature mapping $\Phi: X \rightarrow H$. The notion of a kernel is obviously central in the making of any kernel-based algorithm and consequently also in any software package containing kernel-based methods. Kernels in \pkg{kernlab} are \proglang{S4} objects of class \code{kernel} extending the \code{function} class with one additional slot containing a list with the kernel hyper-parameters. Package \pkg{kernlab} includes 7 different kernel classes which all contain the class \code{kernel} and are used to implement the existing kernels. These classes are used in the function dispatch mechanism of the kernel utility functions described below. Existing kernel functions are initialized by ``creator'' functions. All kernel functions take two feature vectors as parameters and return the scalar dot product of the vectors. An example of the functionality of a kernel in \pkg{kernlab}: <>= ## create a RBF kernel function with sigma hyper-parameter 0.05 rbf <- rbfdot(sigma = 0.05) rbf ## create two random feature vectors x <- rnorm(10) y <- rnorm(10) ## compute dot product between x,y rbf(x, y) @ The package includes implementations of the following kernels: \begin{itemize} \item the linear \code{vanilladot} kernel implements the simplest of all kernel functions \begin{equation} k(x,x') = \langle x, x' \rangle \end{equation} which is useful specially when dealing with large sparse data vectors~$x$ as is usually the case in text categorization. \item the Gaussian radial basis function \code{rbfdot} \begin{equation} k(x,x') = \exp(-\sigma \|x - x'\|^2) \end{equation} which is a general purpose kernel and is typically used when no further prior knowledge is available about the data. \item the polynomial kernel \code{polydot} \begin{equation} k(x, x') = \left( \mathrm{scale} \cdot \langle x, x' \rangle + \mathrm{offset} \right)^\mathrm{degree}. \end{equation} which is used in classification of images. \item the hyperbolic tangent kernel \code{tanhdot} \begin{equation} k(x, x') = \tanh \left( \mathrm{scale} \cdot \langle x, x' \rangle + \mathrm{offset} \right) \end{equation} which is mainly used as a proxy for neural networks. \item the Bessel function of the first kind kernel \code{besseldot} \begin{equation} k(x, x') = \frac{\mathrm{Bessel}_{(\nu+1)}^n(\sigma \|x - x'\|)} {(\|x-x'\|)^{-n(\nu+1)}}. \end{equation} is a general purpose kernel and is typically used when no further prior knowledge is available and mainly popular in the Gaussian process community. \item the Laplace radial basis kernel \code{laplacedot} \begin{equation} k(x, x') = \exp(-\sigma \|x - x'\|) \end{equation} which is a general purpose kernel and is typically used when no further prior knowledge is available. \item the ANOVA radial basis kernel \code{anovadot} performs well in multidimensional regression problems \begin{equation} k(x, x') = \left(\sum_{k=1}^{n}\exp(-\sigma(x^k-{x'}^k)^2)\right)^{d} \end{equation} where $x^k$ is the $k$th component of $x$. \end{itemize} \subsection{Kernel utility methods} The package also includes methods for computing commonly used kernel expressions (e.g., the Gram matrix). These methods are written in such a way that they take functions (i.e., kernels) and matrices (i.e., vectors of patterns) as arguments. These can be either the kernel functions already included in \pkg{kernlab} or any other function implementing a valid dot product (taking two vector arguments and returning a scalar). In case one of the already implemented kernels is used, the function calls a vectorized implementation of the corresponding function. Moreover, in the case of symmetric matrices (e.g., the dot product matrix of a Support Vector Machine) they only require one argument rather than having to pass the same matrix twice (for rows and columns). The computations for the kernels already available in the package are vectorized whenever possible which guarantees good performance and acceptable memory requirements. Users can define their own kernel by creating a function which takes two vectors as arguments (the data points) and returns a scalar (the dot product). This function can then be based as an argument to the kernel utility methods. For a user defined kernel the dispatch mechanism calls a generic method implementation which calculates the expression by passing the kernel function through a pair of \code{for} loops. The kernel methods included are: \begin{description} \item[\code{kernelMatrix}] This is the most commonly used function. It computes $k(x, x')$, i.e., it computes the matrix $K$ where $K_{ij} = k(x_i, x_j)$ and $x$ is a \emph{row} vector. In particular, \begin{verbatim} K <- kernelMatrix(kernel, x) \end{verbatim} computes the matrix $K_{ij} = k(x_i, x_j)$ where the $x_i$ are the columns of $X$ and \begin{verbatim} K <- kernelMatrix(kernel, x1, x2) \end{verbatim} computes the matrix $K_{ij} = k(x1_i, x2_j)$. \item[\code{kernelFast}] This method is different to \code{kernelMatrix} for \code{rbfdot}, \code{besseldot}, and the \code{laplacedot} kernel, which are all RBF kernels. It is identical to \code{kernelMatrix}, except that it also requires the squared norm of the first argument as additional input. It is mainly used in kernel algorithms, where columns of the kernel matrix are computed per invocation. In these cases, evaluating the norm of each column-entry as it is done on a \code{kernelMatrix} invocation on an RBF kernel, over and over again would cause significant computational overhead. Its invocation is via \begin{verbatim} K = kernelFast(kernel, x1, x2, a) \end{verbatim} Here $a$ is a vector containing the squared norms of $x1$. \item[\code{kernelMult}] is a convenient way of computing kernel expansions. It returns the vector $f = (f(x_1), \dots, f(x_m))$ where \begin{equation} f(x_i) = \sum_{j=1}^{m} k(x_i, x_j) \alpha_j, \mbox{~hence~} f = K \alpha. \end{equation} The need for such a function arises from the fact that $K$ may sometimes be larger than the memory available. Therefore, it is convenient to compute $K$ only in stripes and discard the latter after the corresponding part of $K \alpha$ has been computed. The parameter \code{blocksize} determines the number of rows in the stripes. In particular, \begin{verbatim} f <- kernelMult(kernel, x, alpha) \end{verbatim} computes $f_i = \sum_{j=1}^m k(x_i, x_j) \alpha_j$ and \begin{verbatim} f <- kernelMult(kernel, x1, x2, alpha) \end{verbatim} computes $f_i = \sum_{j=1}^m k(x1_i, x2_j) \alpha_j$. \item[\code{kernelPol}] is a method very similar to \code{kernelMatrix} with the only difference that rather than computing $K_{ij} = k(x_i, x_j)$ it computes $K_{ij} = y_i y_j k(x_i, x_j)$. This means that \begin{verbatim} K <- kernelPol(kernel, x, y) \end{verbatim} computes the matrix $K_{ij} = y_i y_j k(x_i, x_j)$ where the $x_i$ are the columns of $x$ and $y_i$ are elements of the vector~$y$. Moreover, \begin{verbatim} K <- kernelPol(kernel, x1, x2, y1, y2) \end{verbatim} computes the matrix $K_{ij} = y1_i y2_j k(x1_i, x2_j)$. Both \code{x1} and \code{x2} may be matrices and \code{y1} and \code{y2} vectors. \end{description} An example using these functions : <>= ## create a RBF kernel function with sigma hyper-parameter 0.05 poly <- polydot(degree=2) ## create artificial data set x <- matrix(rnorm(60), 6, 10) y <- matrix(rnorm(40), 4, 10) ## compute kernel matrix kx <- kernelMatrix(poly, x) kxy <- kernelMatrix(poly, x, y) @ \section{Kernel methods} Providing a solid base for creating kernel-based methods is part of what we are trying to achieve with this package, the other being to provide a wider range of kernel-based methods in \proglang{R}. In the rest of the paper we present the kernel-based methods available in \pkg{kernlab}. All the methods in \pkg{kernlab} can be used with any of the kernels included in the package as well as with any valid user-defined kernel. User defined kernel functions can be passed to existing kernel-methods in the \code{kernel} argument. \subsection{Support vector machine} Support vector machines \citep{kernlab:Vapnik:1998} have gained prominence in the field of machine learning and pattern classification and regression. The solutions to classification and regression problems sought by kernel-based algorithms such as the SVM are linear functions in the feature space: \begin{equation} f(x) = w^\top \Phi(x) \end{equation} for some weight vector $w \in F$. The kernel trick can be exploited in this whenever the weight vector~$w$ can be expressed as a linear combination of the training points, $w = \sum_{i=1}^{n} \alpha_i \Phi(x_i)$, implying that $f$ can be written as \begin{equation} f(x) = \sum_{i=1}^{n}\alpha_i k(x_i, x) \end{equation} A very important issue that arises is that of choosing a kernel~$k$ for a given learning task. Intuitively, we wish to choose a kernel that induces the ``right'' metric in the space. Support Vector Machines choose a function $f$ that is linear in the feature space by optimizing some criterion over the sample. In the case of the 2-norm Soft Margin classification the optimization problem takes the form: \begin{eqnarray} \nonumber \mathrm{minimize} && t(w,\xi) = \frac{1}{2}{\|w\|}^2+\frac{C}{m}\sum_{i=1}^{m}\xi_i \\ \mbox{subject to~} && y_i ( \langle x_i , w \rangle +b ) \geq 1- \xi_i \qquad (i=1,\dots,m)\\ \nonumber && \xi_i \ge 0 \qquad (i=1,\dots, m) \end{eqnarray} Based on similar methodology, SVMs deal with the problem of novelty detection (or one class classification) and regression. \pkg{kernlab}'s implementation of support vector machines, \code{ksvm}, is based on the optimizers found in \pkg{bsvm}\footnote{\url{http://www.csie.ntu.edu.tw/~cjlin/bsvm}} \citep{kernlab:Hsu:2002} and \pkg{libsvm} \citep{kernlab:Chang+Lin:2001} which includes a very efficient version of the Sequential Minimization Optimization (SMO). SMO decomposes the SVM Quadratic Problem (QP) without using any numerical QP optimization steps. Instead, it chooses to solve the smallest possible optimization problem involving two elements of $\alpha_i$ because they must obey one linear equality constraint. At every step, SMO chooses two $\alpha_i$ to jointly optimize and finds the optimal values for these $\alpha_i$ analytically, thus avoiding numerical QP optimization, and updates the SVM to reflect the new optimal values. The SVM implementations available in \code{ksvm} include the C-SVM classification algorithm along with the $\nu$-SVM classification formulation which is equivalent to the former but has a more natural ($\nu$) model parameter taking values in $[0,1]$ and is proportional to the fraction of support vectors found in the data set and the training error. For classification problems which include more than two classes (multi-class) a one-against-one or pairwise classification method \citep{kernlab:Knerr:1990, kernlab:Kressel:1999} is used. This method constructs ${k \choose 2}$ classifiers where each one is trained on data from two classes. Prediction is done by voting where each classifier gives a prediction and the class which is predicted more often wins (``Max Wins''). This method has been shown to produce robust results when used with SVMs \citep{kernlab:Hsu2:2002}. Furthermore the \code{ksvm} implementation provides the ability to produce class probabilities as output instead of class labels. This is done by an improved implementation \citep{kernlab:Lin:2001} of Platt's posteriori probabilities \citep{kernlab:Platt:2000} where a sigmoid function \begin{equation} P(y=1\mid f) = \frac{1}{1+ e^{Af+B}} \end{equation} is fitted on the decision values~$f$ of the binary SVM classifiers, $A$ and $B$ are estimated by minimizing the negative log-likelihood function. To extend the class probabilities to the multi-class case, each binary classifiers class probability output is combined by the \code{couple} method which implements methods for combing class probabilities proposed in \citep{kernlab:Wu:2003}. Another approach for multIn order to create a similar probability output for regression, following \cite{kernlab:Weng:2004}, we suppose that the SVM is trained on data from the model \begin{equation} y_i = f(x_i) + \delta_i \end{equation} where $f(x_i)$ is the underlying function and $\delta_i$ is independent and identical distributed random noise. Given a test data $x$ the distribution of $y$ given $x$ and allows one to draw probabilistic inferences about $y$ e.g. one can construct a predictive interval $\Phi = \Phi(x)$ such that $y \in \Phi$ with a certain probability. If $\hat{f}$ is the estimated (predicted) function of the SVM on new data then $\eta = \eta(x) = y - \hat{f}(x)$ is the prediction error and $y \in \Phi$ is equivalent to $\eta \in \Phi $. Empirical observation shows that the distribution of the residuals $\eta$ can be modeled both by a Gaussian and a Laplacian distribution with zero mean. In this implementation the Laplacian with zero mean is used : \begin{equation} p(z) = \frac{1}{2\sigma}e^{-\frac{|z|}{\sigma}} \end{equation} Assuming that $\eta$ are independent the scale parameter $\sigma$ is estimated by maximizing the likelihood. The data for the estimation is produced by a three-fold cross-validation. For the Laplace distribution the maximum likelihood estimate is : \begin{equation} \sigma = \frac{\sum_{i=1}^m|\eta_i|}{m} \end{equation} i-class classification supported by the \code{ksvm} function is the one proposed in \cite{kernlab:Crammer:2000}. This algorithm works by solving a single optimization problem including the data from all classes: \begin{eqnarray} \nonumber \mathrm{minimize} && t(w_n,\xi) = \frac{1}{2}\sum_{n=1}^k{\|w_n\|}^2+\frac{C}{m}\sum_{i=1}^{m}\xi_i \\ \mbox{subject to~} && \langle x_i , w_{y_i} \rangle - \langle x_i , w_{n} \rangle \geq b_i^n - \xi_i \qquad (i=1,\dots,m) \\ \mbox{where} && b_i^n = 1 - \delta_{y_i,n} \end{eqnarray} where the decision function is \begin{equation} \mathrm{argmax}_{m=1,\dots,k} \langle x_i , w_{n} \rangle \end{equation} This optimization problem is solved by a decomposition method proposed in \cite{kernlab:Hsu:2002} where optimal working sets are found (that is, sets of $\alpha_i$ values which have a high probability of being non-zero). The QP sub-problems are then solved by a modified version of the \pkg{TRON}\footnote{\url{http://www-unix.mcs.anl.gov/~more/tron/}} \citep{kernlab:more:1999} optimization software. One-class classification or novelty detection \citep{kernlab:Williamson:1999, kernlab:Tax:1999}, where essentially an SVM detects outliers in a data set, is another algorithm supported by \code{ksvm}. SVM novelty detection works by creating a spherical decision boundary around a set of data points by a set of support vectors describing the spheres boundary. The $\nu$ parameter is used to control the volume of the sphere and consequently the number of outliers found. Again, the value of $\nu$ represents the fraction of outliers found. Furthermore, $\epsilon$-SVM \citep{kernlab:Vapnik2:1995} and $\nu$-SVM \citep{kernlab:Smola1:2000} regression are also available. The problem of model selection is partially addressed by an empirical observation for the popular Gaussian RBF kernel \citep{kernlab:Caputo:2002}, where the optimal values of the hyper-parameter of sigma are shown to lie in between the 0.1 and 0.9 quantile of the $\|x- x'\| $ statistics. The \code{sigest} function uses a sample of the training set to estimate the quantiles and returns a vector containing the values of the quantiles. Pretty much any value within this interval leads to good performance. An example for the \code{ksvm} function is shown below. <>= ## simple example using the promotergene data set data(promotergene) ## create test and training set tindex <- sample(1:dim(promotergene)[1],5) genetrain <- promotergene[-tindex, ] genetest <- promotergene[tindex,] ## train a support vector machine gene <- ksvm(Class~.,data=genetrain,kernel="rbfdot",kpar="automatic",C=60,cross=3,prob.model=TRUE) gene predict(gene, genetest) predict(gene, genetest, type="probabilities") @ \begin{figure} \centering <>= set.seed(123) x <- rbind(matrix(rnorm(120),,2),matrix(rnorm(120,mean=3),,2)) y <- matrix(c(rep(1,60),rep(-1,60))) svp <- ksvm(x,y,type="C-svc") plot(svp,data=x) @ \caption{A contour plot of the SVM decision values for a toy binary classification problem using the \code{plot} function} \label{fig:ksvm Plot} \end{figure} \subsection{Relevance vector machine} The relevance vector machine \citep{kernlab:Tipping:2001} is a probabilistic sparse kernel model identical in functional form to the SVM making predictions based on a function of the form \begin{equation} y(x) = \sum_{n=1}^{N} \alpha_n K(\mathbf{x},\mathbf{x}_n) + a_0 \end{equation} where $\alpha_n$ are the model ``weights'' and $K(\cdotp,\cdotp)$ is a kernel function. It adopts a Bayesian approach to learning, by introducing a prior over the weights $\alpha$ \begin{equation} p(\alpha, \beta) = \prod_{i=1}^m N(\beta_i \mid 0 , a_i^{-1}) \mathrm{Gamma}(\beta_i\mid \beta_\beta , \alpha_\beta) \end{equation} governed by a set of hyper-parameters $\beta$, one associated with each weight, whose most probable values are iteratively estimated for the data. Sparsity is achieved because in practice the posterior distribution in many of the weights is sharply peaked around zero. Furthermore, unlike the SVM classifier, the non-zero weights in the RVM are not associated with examples close to the decision boundary, but rather appear to represent ``prototypical'' examples. These examples are termed \emph{relevance vectors}. \pkg{kernlab} currently has an implementation of the RVM based on a type~II maximum likelihood method which can be used for regression. The functions returns an \proglang{S4} object containing the model parameters along with indexes for the relevance vectors and the kernel function and hyper-parameters used. <>= x <- seq(-20, 20, 0.5) y <- sin(x)/x + rnorm(81, sd = 0.03) y[41] <- 1 @ <>= rvmm <- rvm(x, y,kernel="rbfdot",kpar=list(sigma=0.1)) rvmm ytest <- predict(rvmm, x) @ \begin{figure} \centering <>= plot(x, y, cex=0.5) lines(x, ytest, col = "red") points(x[RVindex(rvmm)],y[RVindex(rvmm)],pch=21) @ \caption{Relevance vector regression on data points created by the $sinc(x)$ function, relevance vectors are shown circled.} \label{fig:RVM sigmoid} \end{figure} \subsection{Gaussian processes} Gaussian processes \citep{kernlab:Williams:1995} are based on the ``prior'' assumption that adjacent observations should convey information about each other. In particular, it is assumed that the observed variables are normal, and that the coupling between them takes place by means of the covariance matrix of a normal distribution. Using the kernel matrix as the covariance matrix is a convenient way of extending Bayesian modeling of linear estimators to nonlinear situations. Furthermore it represents the counterpart of the ``kernel trick'' in methods minimizing the regularized risk. For regression estimation we assume that rather than observing $t(x_i)$ we observe $y_i = t(x_i) + \xi_i$ where $\xi_i$ is assumed to be independent Gaussian distributed noise with zero mean. The posterior distribution is given by \begin{equation} p(\mathbf{y}\mid \mathbf{t}) = \left[ \prod_ip(y_i - t(x_i)) \right] \frac{1}{\sqrt{(2\pi)^m \det(K)}} \exp \left(\frac{1}{2}\mathbf{t}^T K^{-1} \mathbf{t} \right) \end{equation} and after substituting $\mathbf{t} = K\mathbf{\alpha}$ and taking logarithms \begin{equation} \ln{p(\mathbf{\alpha} \mid \mathbf{y})} = - \frac{1}{2\sigma^2}\| \mathbf{y} - K \mathbf{\alpha} \|^2 -\frac{1}{2}\mathbf{\alpha}^T K \mathbf{\alpha} +c \end{equation} and maximizing $\ln{p(\mathbf{\alpha} \mid \mathbf{y})}$ for $\mathbf{\alpha}$ to obtain the maximum a posteriori approximation yields \begin{equation} \mathbf{\alpha} = (K + \sigma^2\mathbf{1})^{-1} \mathbf{y} \end{equation} Knowing $\mathbf{\alpha}$ allows for prediction of $y$ at a new location $x$ through $y = K(x,x_i){\mathbf{\alpha}}$. In similar fashion Gaussian processes can be used for classification. \code{gausspr} is the function in \pkg{kernlab} implementing Gaussian processes for classification and regression. \subsection{Ranking} The success of Google has vividly demonstrated the value of a good ranking algorithm in real world problems. \pkg{kernlab} includes a ranking algorithm based on work published in \citep{kernlab:Zhou:2003}. This algorithm exploits the geometric structure of the data in contrast to the more naive approach which uses the Euclidean distances or inner products of the data. Since real world data are usually highly structured, this algorithm should perform better than a simpler approach based on a Euclidean distance measure. First, a weighted network is defined on the data and an authoritative score is assigned to every point. The query points act as source nodes that continually pump their scores to the remaining points via the weighted network, and the remaining points further spread the score to their neighbors. The spreading process is repeated until convergence and the points are ranked according to the scores they received. Suppose we are given a set of data points $X = {x_1, \dots, x_{s}, x_{s+1}, \dots, x_{m}}$ in $\mathbf{R}^n$ where the first $s$ points are the query points and the rest are the points to be ranked. The algorithm works by connecting the two nearest points iteratively until a connected graph $G = (X, E)$ is obtained where $E$ is the set of edges. The affinity matrix $K$ defined e.g.\ by $K_{ij} = \exp(-\sigma\|x_i - x_j \|^2)$ if there is an edge $e(i,j) \in E$ and $0$ for the rest and diagonal elements. The matrix is normalized as $L = D^{-1/2}KD^{-1/2}$ where $D_{ii} = \sum_{j=1}^m K_{ij}$, and \begin{equation} f(t+1) = \alpha Lf(t) + (1 - \alpha)y \end{equation} is iterated until convergence, where $\alpha$ is a parameter in $[0,1)$. The points are then ranked according to their final scores $f_{i}(t_f)$. \pkg{kernlab} includes an \proglang{S4} method implementing the ranking algorithm. The algorithm can be used both with an edge-graph where the structure of the data is taken into account, and without which is equivalent to ranking the data by their distance in the projected space. \begin{figure} \centering <>= data(spirals) ran <- spirals[rowSums(abs(spirals) < 0.55) == 2,] ranked <- ranking(ran, 54, kernel = "rbfdot", kpar = list(sigma = 100), edgegraph = TRUE) ranked[54, 2] <- max(ranked[-54, 2]) c<-1:86 op <- par(mfrow = c(1, 2),pty="s") plot(ran) plot(ran, cex=c[ranked[,3]]/40) @ \caption{The points on the left are ranked according to their similarity to the upper most left point. Points with a higher rank appear bigger. Instead of ranking the points on simple Euclidean distance the structure of the data is recognized and all points on the upper structure are given a higher rank although further away in distance than points in the lower structure.} \label{fig:Ranking} \end{figure} \subsection{Online learning with kernels} The \code{onlearn} function in \pkg{kernlab} implements the online kernel algorithms for classification, novelty detection and regression described in \citep{kernlab:Kivinen:2004}. In batch learning, it is typically assumed that all the examples are immediately available and are drawn independently from some distribution $P$. One natural measure of quality for some $f$ in that case is the expected risk \begin{equation} R[f,P] := E_{(x,y)~P}[l(f(x),y)] \end{equation} Since usually $P$ is unknown a standard approach is to instead minimize the empirical risk \begin{equation} R_{emp}[f,P] := \frac{1}{m}\sum_{t=1}^m l(f(x_t),y_t) \end{equation} Minimizing $R_{emp}[f]$ may lead to overfitting (complex functions that fit well on the training data but do not generalize to unseen data). One way to avoid this is to penalize complex functions by instead minimizing the regularized risk. \begin{equation} R_{reg}[f,S] := R_{reg,\lambda}[f,S] := R_{emp}[f] = \frac{\lambda}{2}\|f\|_{H}^2 \end{equation} where $\lambda > 0$ and $\|f\|_{H} = {\langle f,f \rangle}_{H}^{\frac{1}{2}}$ does indeed measure the complexity of $f$ in a sensible way. The constant $\lambda$ needs to be chosen appropriately for each problem. Since in online learning one is interested in dealing with one example at the time the definition of an instantaneous regularized risk on a single example is needed \begin{equation} R_inst[f,x,y] := R_{inst,\lambda}[f,x,y] := R_{reg,\lambda}[f,((x,y))] \end{equation} The implemented algorithms are classical stochastic gradient descent algorithms performing gradient descent on the instantaneous risk. The general form of the update rule is : \begin{equation} f_{t+1} = f_t - \eta \partial_f R_{inst,\lambda}[f,x_t,y_t]|_{f=f_t} \end{equation} where $f_i \in H$ and $\partial_f$< is short hand for $\partial \ \partial f$ (the gradient with respect to $f$) and $\eta_t > 0$ is the learning rate. Due to the learning taking place in a \textit{reproducing kernel Hilbert space} $H$ the kernel $k$ used has the property $\langle f,k(x,\cdotp)\rangle_H = f(x)$ and therefore \begin{equation} \partial_f l(f(x_t)),y_t) = l'(f(x_t),y_t)k(x_t,\cdotp) \end{equation} where $l'(z,y) := \partial_z l(z,y)$. Since $\partial_f\|f\|_H^2 = 2f$ the update becomes \begin{equation} f_{t+1} := (1 - \eta\lambda)f_t -\eta_t \lambda '( f_t(x_t),y_t)k(x_t,\cdotp) \end{equation} The \code{onlearn} function implements the online learning algorithm for regression, classification and novelty detection. The online nature of the algorithm requires a different approach to the use of the function. An object is used to store the state of the algorithm at each iteration $t$ this object is passed to the function as an argument and is returned at each iteration $t+1$ containing the model parameter state at this step. An empty object of class \code{onlearn} is initialized using the \code{inlearn} function. <>= ## create toy data set x <- rbind(matrix(rnorm(90),,2),matrix(rnorm(90)+3,,2)) y <- matrix(c(rep(1,45),rep(-1,45)),,1) ## initialize onlearn object on <- inlearn(2,kernel="rbfdot",kpar=list(sigma=0.2),type="classification") ind <- sample(1:90,90) ## learn one data point at the time for(i in ind) on <- onlearn(on,x[i,],y[i],nu=0.03,lambda=0.1) sign(predict(on,x)) @ \subsection{Spectral clustering} Spectral clustering \citep{kernlab:Ng:2001} is a recently emerged promising alternative to common clustering algorithms. In this method one uses the top eigenvectors of a matrix created by some similarity measure to cluster the data. Similarly to the ranking algorithm, an affinity matrix is created out from the data as \begin{equation} K_{ij}=\exp(-\sigma\|x_i - x_j \|^2) \end{equation} and normalized as $L = D^{-1/2}KD^{-1/2}$ where $D_{ii} = \sum_{j=1}^m K_{ij}$. Then the top $k$ eigenvectors (where $k$ is the number of clusters to be found) of the affinity matrix are used to form an $n \times k$ matrix $Y$ where each column is normalized again to unit length. Treating each row of this matrix as a data point, \code{kmeans} is finally used to cluster the points. \pkg{kernlab} includes an \proglang{S4} method called \code{specc} implementing this algorithm which can be used through an formula interface or a matrix interface. The \proglang{S4} object returned by the method extends the class ``vector'' and contains the assigned cluster for each point along with information on the centers size and within-cluster sum of squares for each cluster. In case a Gaussian RBF kernel is being used a model selection process can be used to determine the optimal value of the $\sigma$ hyper-parameter. For a good value of $\sigma$ the values of $Y$ tend to cluster tightly and it turns out that the within cluster sum of squares is a good indicator for the ``quality'' of the sigma parameter found. We then iterate through the sigma values to find an optimal value for $\sigma$. \begin{figure} \centering <>= data(spirals) sc <- specc(spirals, centers=2) plot(spirals, pch=(23 - 2*sc)) @ \caption{Clustering the two spirals data set with \code{specc}} \label{fig:Spectral Clustering} \end{figure} \subsection{Kernel principal components analysis} Principal component analysis (PCA) is a powerful technique for extracting structure from possibly high-dimensional datasets. PCA is an orthogonal transformation of the coordinate system in which we describe the data. The new coordinates by which we represent the data are called principal components. Kernel PCA \citep{kernlab:Schoelkopf:1998} performs a nonlinear transformation of the coordinate system by finding principal components which are nonlinearly related to the input variables. Given a set of centered observations $x_k$, $k=1,\dots,M$, $x_k \in \mathbf{R}^N$, PCA diagonalizes the covariance matrix $C = \frac{1}{M}\sum_{j=1}^Mx_jx_{j}^T$ by solving the eigenvalue problem $\lambda\mathbf{v}=C\mathbf{v}$. The same computation can be done in a dot product space $F$ which is related to the input space by a possibly nonlinear map $\Phi:\mathbf{R}^N \rightarrow F$, $x \mapsto \mathbf{X}$. Assuming that we deal with centered data and use the covariance matrix in $F$, \begin{equation} \hat{C}=\frac{1}{C}\sum_{j=1}^N \Phi(x_j)\Phi(x_j)^T \end{equation} the kernel principal components are then computed by taking the eigenvectors of the centered kernel matrix $K_{ij} = \langle \Phi(x_j),\Phi(x_j) \rangle$. \code{kpca}, the the function implementing KPCA in \pkg{kernlab}, can be used both with a formula and a matrix interface, and returns an \proglang{S4} object of class \code{kpca} containing the principal components the corresponding eigenvalues along with the projection of the training data on the new coordinate system. Furthermore, the \code{predict} function can be used to embed new data points into the new coordinate system. \begin{figure} \centering <>= data(spam) train <- sample(1:dim(spam)[1],400) kpc <- kpca(~.,data=spam[train,-58],kernel="rbfdot",kpar=list(sigma=0.001),features=2) kpcv <- pcv(kpc) plot(rotated(kpc),col=as.integer(spam[train,58]),xlab="1st Principal Component",ylab="2nd Principal Component") @ \caption{Projection of the spam data on two kernel principal components using an RBF kernel} \label{fig:KPCA} \end{figure} \subsection{Kernel feature analysis} Whilst KPCA leads to very good results there are nevertheless some issues to be addressed. First the computational complexity of the standard version of KPCA, the algorithm scales $O(m^3)$ and secondly the resulting feature extractors are given as a dense expansion in terms of the of the training patterns. Sparse solutions are often achieved in supervised learning settings by using an $l_1$ penalty on the expansion coefficients. An algorithm can be derived using the same approach in feature extraction requiring only $n$ basis functions to compute the first $n$ feature. Kernel feature analysis \citep{kernlab:Olvi:2000} is computationally simple and scales approximately one order of magnitude better on large data sets than standard KPCA. Choosing $\Omega [f] = \sum_{i=1}^m |\alpha_i |$ this yields \begin{equation} F_{LP} = \{ \mathbf{w} \vert \mathbf{w} = \sum_{i=1}^m \alpha_i \Phi(x_i) \mathrm{with} \sum_{i=1}^m |\alpha_i | \leq 1 \} \end{equation} This setting leads to the first ``principal vector'' in the $l_1$ context \begin{equation} \mathbf{\nu}^1 = \mathrm{argmax}_{\mathbf{\nu} \in F_{LP}} \frac{1}{m} \sum_{i=1}^m \langle \mathbf{\nu},\mathbf{\Phi}(x_i) - \frac{1}{m}\sum_{j=1}^m\mathbf{\Phi}(x_i) \rangle^2 \end{equation} Subsequent ``principal vectors'' can be defined by enforcing optimality with respect to the remaining orthogonal subspaces. Due to the $l_1$ constrain the solution has the favorable property of being sparse in terms of the coefficients $\alpha_i$. The function \code{kfa} in \pkg{kernlab} implements Kernel Feature Analysis by using a projection pursuit technique on a sample of the data. Results are then returned in an \proglang{S4} object. \begin{figure} \centering <>= data(promotergene) f <- kfa(~.,data=promotergene,features=2,kernel="rbfdot",kpar=list(sigma=0.013)) plot(predict(f,promotergene),col=as.numeric(promotergene[,1]),xlab="1st Feature",ylab="2nd Feature") @ \caption{Projection of the spam data on two features using an RBF kernel} \label{fig:KFA} \end{figure} \subsection{Kernel canonical correlation analysis} Canonical correlation analysis (CCA) is concerned with describing the linear relations between variables. If we have two data sets $x_1$ and $x_2$, then the classical CCA attempts to find linear combination of the variables which give the maximum correlation between the combinations. I.e., if \begin{eqnarray*} && y_1 = \mathbf{w_1}\mathbf{x_1} = \sum_j w_1 x_{1j} \\ && y_2 = \mathbf{w_2}\mathbf{x_2} = \sum_j w_2 x_{2j} \end{eqnarray*} one wishes to find those values of $\mathbf{w_1}$ and $\mathbf{w_2}$ which maximize the correlation between $y_1$ and $y_2$. Similar to the KPCA algorithm, CCA can be extended and used in a dot product space~$F$ which is related to the input space by a possibly nonlinear map $\Phi:\mathbf{R}^N \rightarrow F$, $x \mapsto \mathbf{X}$ as \begin{eqnarray*} && y_1 = \mathbf{w_1}\mathbf{\Phi(x_1)} = \sum_j w_1 \Phi(x_{1j}) \\ && y_2 = \mathbf{w_2}\mathbf{\Phi(x_2)} = \sum_j w_2 \Phi(x_{2j}) \end{eqnarray*} Following \citep{kernlab:kuss:2003}, the \pkg{kernlab} implementation of a KCCA projects the data vectors on a new coordinate system using KPCA and uses linear CCA to retrieve the correlation coefficients. The \code{kcca} method in \pkg{kernlab} returns an \proglang{S4} object containing the correlation coefficients for each data set and the corresponding correlation along with the kernel used. \subsection{Interior point code quadratic optimizer} In many kernel based algorithms, learning implies the minimization of some risk function. Typically we have to deal with quadratic or general convex problems for support vector machines of the type \begin{equation} \begin{array}{ll} \mathrm{minimize} & f(x) \\ \mbox{subject to~} & c_i(x) \leq 0 \mbox{~for all~} i \in [n]. \end{array} \end{equation} $f$ and $c_i$ are convex functions and $n \in \mathbf{N}$. \pkg{kernlab} provides the \proglang{S4} method \code{ipop} implementing an optimizer of the interior point family \citep{kernlab:Vanderbei:1999} which solves the quadratic programming problem \begin{equation} \begin{array}{ll} \mathrm{minimize} & c^\top x+\frac{1}{2}x^\top H x \\ \mbox{subject to~} & b \leq Ax \leq b + r\\ & l \leq x \leq u \\ \end{array} \end{equation} This optimizer can be used in regression, classification, and novelty detection in SVMs. \subsection{Incomplete cholesky decomposition} When dealing with kernel based algorithms, calculating a full kernel matrix should be avoided since it is already a $O(N^2)$ operation. Fortunately, the fact that kernel matrices are positive semidefinite is a strong constraint and good approximations can be found with small computational cost. The Cholesky decomposition factorizes a positive semidefinite $N \times N$ matrix $K$ as $K=ZZ^T$, where $Z$ is an upper triangular $N \times N$ matrix. Exploiting the fact that kernel matrices are usually of low rank, an \emph{incomplete Cholesky decomposition} \citep{kernlab:Wright:1999} finds a matrix $\tilde{Z}$ of size $N \times M$ where $M\ll N$ such that the norm of $K-\tilde{Z}\tilde{Z}^T$ is smaller than a given tolerance $\theta$. The main difference of incomplete Cholesky decomposition to the standard Cholesky decomposition is that pivots which are below a certain threshold are simply skipped. If $L$ is the number of skipped pivots, we obtain a $\tilde{Z}$ with only $M = N - L$ columns. The algorithm works by picking a column from $K$ to be added by maximizing a lower bound on the reduction of the error of the approximation. \pkg{kernlab} has an implementation of an incomplete Cholesky factorization called \code{inc.chol} which computes the decomposed matrix $\tilde{Z}$ from the original data for any given kernel without the need to compute a full kernel matrix beforehand. This has the advantage that no full kernel matrix has to be stored in memory. \section{Conclusions} In this paper we described \pkg{kernlab}, a flexible and extensible kernel methods package for \proglang{R} with existing modern kernel algorithms along with tools for constructing new kernel based algorithms. It provides a unified framework for using and creating kernel-based algorithms in \proglang{R} while using all of \proglang{R}'s modern facilities, like \proglang{S4} classes and namespaces. Our aim for the future is to extend the package and add more kernel-based methods as well as kernel relevant tools. Sources and binaries for the latest version of \pkg{kernlab} are available at CRAN\footnote{\url{http://CRAN.R-project.org}} under the GNU Public License. A shorter version of this introduction to the \proglang{R} package \pkg{kernlab} is published as \cite{kernlab:Karatzoglou+Smola+Hornik:2004} in the \emph{Journal of Statistical Software}. \bibliography{jss} \end{document} kernlab/MD50000644000175100001440000001547012774406060012233 0ustar hornikusersdfb1bf6e7d4d9c9651f667c257ede4a3 *DESCRIPTION 4bf5b70ad948b31056e6fc5102ddd995 *NAMESPACE 7db9a58cb6e5aeae749727781fe388f5 *R/aobjects.R 0750c9216dfd490ac36814b8b1ae24f2 *R/couplers.R f8e0ac1a792745090fa9a8da65847804 *R/csi.R 89d1c67ac3de8ff1e48de1a2dc79d477 *R/gausspr.R ab289bc31386f29fa9b2bc9a667504f4 *R/inchol.R bfa34b64d293a380c5c4d045105d4496 *R/ipop.R 5f574afe5df7904fb80bb214f01fcc6c *R/kcca.R 67aed700531a0ce066bb9300e7f0169c *R/kernelmatrix.R 10804a9fc1281e6af5ccfe98fcb786c2 *R/kernels.R 4df2eb88a79a9ba527515d471042c5ef *R/kfa.R 894f285bbb8e123968cdfcf88c2363c4 *R/kha.R 87fb64fa9308b0337216933d6aa8cdd4 *R/kkmeans.R 78cd6c834753a4f6c9f2ce570df37aaa *R/kmmd.R 03fc2d9d2bc5e3d2719397c9e1bf137f *R/kpca.R b9d06cfc6866fbcef5d38ad8da944242 *R/kqr.R e83af5d40f0f7e2fcb8dc4ecdee7ebfd *R/ksvm.R 1df633ae0f402d126694715b89472a42 *R/lssvm.R 9a6305a7f6f48b3d5b9897aee24c7a88 *R/onlearn.R fca7e1cdba31a9fe3f89e74c2d5ced3e *R/ranking.R 1df11b3a35b28147563ca4a01286a739 *R/rvm.R 42578bea93efc1ad1488b72c8acec274 *R/sigest.R 159df23cf242faa6b7c1a0feb40bdf6d *R/specc.R e4fc5c22d5a9c0f7e07216b36cf668e0 *build/vignette.rds 3337c53aa7b73d32894a86506559d577 *data/income.rda 07de04d0fb0a7fd3718327fecc293172 *data/musk.rda b56158109c8e982903b795145d831be8 *data/promotergene.rda 3f5ee096125bd5fc3e82b4389a4e3eda *data/reuters.rda dd4ab7c1af302468605387cd7bc85dfa *data/spam.rda 313ea4c0122d767e98d9de3320b51ee4 *data/spirals.rda 56737980f0d20a1f84c90e036593620e *data/ticdata.rda 3343578028d05274271ebf66d9383da7 *inst/CITATION 68fe0d0d842fbc1b217f45934a8edf7a *inst/COPYRIGHTS 0d1b1a09dbb52e3b0e58676170c3ce3d *inst/doc/kernlab.R c4c223d07206b59e2d43a585d07164b1 *inst/doc/kernlab.Rnw 5dc201395b0cc80f9070716954dd12a6 *inst/doc/kernlab.pdf ca7923a78d389602d891a3cf6a5193d9 *man/as.kernelMatrix.Rd c0c282d5b6dd984608a1d5b9c92fe478 *man/couple.Rd e36dc0b16ba570c99ead7a48394dc66d *man/csi-class.Rd f87d54c4c4bf47f760cc6a779c7e525d *man/csi.Rd 704bfeedf89329461a20e4cb51a237f0 *man/dots.Rd 285c27b5d9a389dfd7e2f8e392de215c *man/gausspr-class.Rd fd9fe426e55ff79ffa5aabe84abd229c *man/gausspr.Rd b61d371ba2f8d8b137ec3c32a115c3ab *man/inchol-class.Rd f91fdd7d2e3c9aec28d31575d2ba0a6e *man/inchol.Rd 452553ee15225244a50b73aa08cca861 *man/income.Rd 9599ae27d6ebe41302c6236aa381b313 *man/inlearn.Rd bbcfe86bcb66e4b222b9ba13869fa2b0 *man/ipop-class.Rd c2e71c62027e5534eaf1f4c2dbcf0a6a *man/ipop.Rd 62c2b5318bb86222cb8d9cd361998d36 *man/kcca-class.Rd fb5a84011ee5c0fd03287b957379aab7 *man/kcca.Rd ef26a19723ffb7f6eb6dd3539905d6c4 *man/kernel-class.Rd 7357130456764a2b77cbf39d05d8dc98 *man/kernelMatrix.Rd 7a1e2bc5f883b6e7339bd717f0569eaf *man/kfa-class.Rd 22c7587c02310941aa5c484a3551ff70 *man/kfa.Rd 54afaeff97629d4a1353cdd98b5dde37 *man/kha-class.Rd 630bbe5b92f49a6eb501ddd0776fae3b *man/kha.Rd 730086452f568aacc5bea56bb514e2ff *man/kkmeans.Rd c3458139340043b2d63e9a642386582e *man/kmmd-class.Rd 6246385dba8697c83028cbece148c203 *man/kmmd.Rd b39a018897562f1cf907c7d0920186ce *man/kpca-class.Rd ba3a5bde31ea982871c7690edc588b23 *man/kpca.Rd 5a3b2344811fded04018d0b56d9bca23 *man/kqr-class.Rd 1ef59facd1ed13402b663beb16f6593a *man/kqr.Rd 3bdce4dc10887da4bacdac6830e66db8 *man/ksvm-class.Rd f98da25e651db60717100721a7a6f7cc *man/ksvm.Rd dd6a605572b276158f753cf3e3dce63e *man/lssvm-class.Rd bab982b9b6cdbdfa1d9c50cacd72408d *man/lssvm.Rd 95f670451348298d1c5daa00498f9f65 *man/musk.Rd 6d1c014b9f6bb8b59d032fd444bf5a04 *man/onlearn-class.Rd e14a6bd165c9595d1b014bd983d810b5 *man/onlearn.Rd 75f80214439e10c8d1b0104f5bcb44ba *man/plot.Rd f67747838e34ee3400ad4ffe299eba71 *man/prc-class.Rd fb4f0a2a30d3ec62e66a125f64d7f018 *man/predict.gausspr.Rd 69e21e71600ccf8a8df4a1adb84213fe *man/predict.kqr.Rd a92aae4f4aa90adbfc6d9f698426e55c *man/predict.ksvm.Rd 17510c748e43b26899603fff435572fb *man/promotergene.Rd f3a2c50017ea501680b53c9e221bf6b5 *man/ranking-class.Rd 0a26fab5b4dc78f254b408e396aba191 *man/ranking.Rd 8bee0b6c367f1c5f749b296ff48dcc23 *man/reuters.Rd 2b1f6b6093d9d0a915995b59caf1561d *man/rvm-class.Rd f406be43ad5c7a6d4e2b90c46e42d2a6 *man/rvm.Rd 86c5fd418857bae9a5c736e8c57a5c5e *man/sigest.Rd 38c1b0a597898ffd36fd635af5df2d32 *man/spam.Rd b176c7c0f1edb61818e9ecfde276f349 *man/specc-class.Rd 7c1efb159e6b590600d84151e848aca6 *man/specc.Rd c707c7af1229bdfca87272866bb3199a *man/spirals.Rd 149b3590c24913c3718c9f1d6c265b9a *man/stringdot.Rd 5a3d623ac56f129716429ba87481eaeb *man/ticdata.Rd fa4feb7dd29492877886e4d86d0cb8f4 *man/vm-class.Rd 2a6f9e9e044a78154d3cfda5936d6f48 *src/Makevars 2a6f9e9e044a78154d3cfda5936d6f48 *src/Makevars.win 3b77d80677bb88fb39cab4a7d2351056 *src/brweight.cpp 048d635dbf0db99a0b707bf0a9c06984 *src/brweight.h 50cd06527f816675b128669d222bee56 *src/ctable.cpp cb1e056dfcc52d5319e71981f9c90611 *src/ctable.h 342cbb0568a2fa8f27b1f0c42542737e *src/cweight.cpp 0ede046d861731d10f965e2ff8f50e4e *src/cweight.h 5c02223129df9d548c614acd0593645d *src/datatype.h f085fe8cca3cb634567600216eb4aad2 *src/dbreakpt.c b08bdfd188f69c9ab839895556789d64 *src/dcauchy.c 455ccdeed46ccda0958453306fe9a951 *src/dgpnrm.c c9ae627ea63dec6d72867c2026121648 *src/dgpstep.c 821081c5c42e2a20237abcced03a3a6f *src/dprecond.c 165209b9e9410785dcee940d35d53c05 *src/dprsrch.c 33b02078ecd469dfda0aeb1e5ba98cb2 *src/dspcg.c e13d4f68dd0e3b613f40066c47387233 *src/dtron.c f3c6c30f24ade3e5aa146d0f0a6b11f5 *src/dtrpcg.c 616fbd8165eddace388ffc7ffd90c753 *src/dtrqsol.c beb2c099ff3dd87e3474a30a49a8437e *src/errorcode.h a0f99b7568a3b1c4f0e47437b022e4dc *src/esa.cpp ab96f4b2f43cc0306c88547ab6abe1ad *src/esa.h 5a7166f36e34cc037b9c2006f8bc00c9 *src/expdecayweight.cpp 7f04e95fcd76ee21dcea4d7138d96326 *src/expdecayweight.h d16372bf79ce22a92dfcf3c0d0b769e7 *src/ilcpfactory.h f103b80f529451ab71a425a31ed1eabf *src/inductionsort.cpp fd4a5ad4b79ca119885410bb45c7d12f *src/inductionsort.h 76adf49038c3585cf216cd033a9b4183 *src/introsort.h 0073f847ac8606d19e03cb0eeb27e0a2 *src/isafactory.h 94245de3f9b29eee07fd1f7d8d8929cd *src/iweightfactory.h d2d7af10799002c2392f038e7d767c3f *src/kspectrumweight.cpp b5d07bb286e3767cda7a371c50d0122e *src/kspectrumweight.h 81884b6e3b3e02f26e75974febbdaa2d *src/lcp.cpp 6de81523902a1d4dce2b38ce3d57ce98 *src/lcp.h f47f3118ea197009f6f0e12edeb5fc17 *src/misc.c d5d113bf04eb7759c8fd0f915dd24c64 *src/msufsort.cpp 82af93b02f090a83152b52239e0e3711 *src/msufsort.h 36b8004ade5fe1c5c2edb01cf74ce5cd *src/solvebqp.c 823808c44b18f59c9eef3ad4f1f41930 *src/stack.h 079a2f29ea98ab6f5ca4e814bb2917ba *src/stringk.c 801972af49fa57499fc3e519d202a8ad *src/stringkernel.cpp 1c19c2215be7a2b25f7439fc061f2daa *src/stringkernel.h ae74f6ea199b5d5b9b4b045afac5fa40 *src/svm.cpp 670301bb88ff2b0f28ece190a96635c7 *src/svm.h 5f5910aab31dc2ebacb4b15caba8e873 *src/wkasailcp.cpp fd6807b3526c7d5442f66a2660bd9e4c *src/wkasailcp.h f48a5df5ecbf1ac1831e5582798eb57d *src/wmsufsort.cpp 2694af88ced7e4391e92120d0c90587c *src/wmsufsort.h a324922cf3b84ae82f364be31135168f *vignettes/A.cls 0bb2f41f77a58dd866a86cd0b164b3c6 *vignettes/jss.bib c4c223d07206b59e2d43a585d07164b1 *vignettes/kernlab.Rnw kernlab/build/0000755000175100001440000000000012774400037013011 5ustar hornikuserskernlab/build/vignette.rds0000644000175100001440000000045412774400037015353 0ustar hornikusersuPQO0.7hLk?`e1Y|=` b)"eHM% ̈ꧻiVC<*G x #ZУ}t;sȐBҭưϨ")4s䟇ji`JX!WC]c<_VڲWkuSUB*+{k XL+)2 ex6 ES+ce_[8'K?Z!p +p֛qğ})ppIڶ;H@Aj; Pekernlab/DESCRIPTION0000644000175100001440000000176612774406060013434 0ustar hornikusersPackage: kernlab Version: 0.9-25 Title: Kernel-Based Machine Learning Lab Authors@R: c(person("Alexandros", "Karatzoglou", role = c("aut", "cre"), email = "alexandros.karatzoglou@gmail.com"), person("Alex", "Smola", role = "aut"), person("Kurt", "Hornik", role = "aut")) Description: Kernel-based machine learning methods for classification, regression, clustering, novelty detection, quantile regression and dimensionality reduction. Among other methods 'kernlab' includes Support Vector Machines, Spectral Clustering, Kernel PCA, Gaussian Processes and a QP solver. Depends: R (>= 2.10) Imports: methods, stats, grDevices, graphics LazyLoad: Yes License: GPL-2 NeedsCompilation: yes Packaged: 2016-10-03 06:52:15 UTC; hornik Author: Alexandros Karatzoglou [aut, cre], Alex Smola [aut], Kurt Hornik [aut] Maintainer: Alexandros Karatzoglou Repository: CRAN Date/Publication: 2016-10-03 09:43:44 kernlab/man/0000755000175100001440000000000012560414652012466 5ustar hornikuserskernlab/man/rvm-class.Rd0000644000175100001440000001100211304023134014640 0ustar hornikusers\name{rvm-class} \docType{class} \alias{rvm-class} \alias{RVindex} \alias{mlike} \alias{nvar} \alias{RVindex,rvm-method} \alias{alpha,rvm-method} \alias{cross,rvm-method} \alias{error,rvm-method} \alias{kcall,rvm-method} \alias{kernelf,rvm-method} \alias{kpar,rvm-method} \alias{lev,rvm-method} \alias{mlike,rvm-method} \alias{nvar,rvm-method} \alias{type,rvm-method} \alias{xmatrix,rvm-method} \alias{ymatrix,rvm-method} \title{Class "rvm"} \description{Relevance Vector Machine Class} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("rvm", ...)}. or by calling the \code{rvm} function. } \section{Slots}{ \describe{ \item{\code{tol}:}{Object of class \code{"numeric"} contains tolerance of termination criteria used.} \item{\code{kernelf}:}{Object of class \code{"kfunction"} contains the kernel function used } \item{\code{kpar}:}{Object of class \code{"list"} contains the hyperparameter used} \item{\code{kcall}:}{Object of class \code{"call"} contains the function call} \item{\code{type}:}{Object of class \code{"character"} contains type of problem} \item{\code{terms}:}{Object of class \code{"ANY"} containing the terms representation of the symbolic model used (when using a formula interface)} \item{\code{xmatrix}:}{Object of class \code{"matrix"} contains the data matrix used during computation} \item{\code{ymatrix}:}{Object of class \code{"output"} contains the response matrix} \item{\code{fitted}:}{Object of class \code{"output"} with the fitted values, (predict on training set).} \item{\code{lev}:}{Object of class \code{"vector"} contains the levels of the response (in classification)} \item{\code{nclass}:}{Object of class \code{"numeric"} contains the number of classes (in classification)} \item{\code{alpha}:}{Object of class \code{"listI"} containing the the resulting alpha vector} \item{\code{coef}:}{Object of class \code{"ANY"} containing the the resulting model parameters} \item{\code{nvar}:}{Object of class \code{"numeric"} containing the calculated variance (in case of regression)} \item{\code{mlike}:}{Object of class \code{"numeric"} containing the computed maximum likelihood} \item{\code{RVindex}:}{Object of class \code{"vector"} containing the indexes of the resulting relevance vectors } \item{\code{nRV}:}{Object of class \code{"numeric"} containing the number of relevance vectors} \item{\code{cross}:}{Object of class \code{"numeric"} containing the resulting cross validation error } \item{\code{error}:}{Object of class \code{"numeric"} containing the training error} \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed on NA} } } \section{Methods}{ \describe{ \item{RVindex}{\code{signature(object = "rvm")}: returns the index of the relevance vectors } \item{alpha}{\code{signature(object = "rvm")}: returns the resulting alpha vector} \item{cross}{\code{signature(object = "rvm")}: returns the resulting cross validation error} \item{error}{\code{signature(object = "rvm")}: returns the training error } \item{fitted}{\code{signature(object = "vm")}: returns the fitted values } \item{kcall}{\code{signature(object = "rvm")}: returns the function call } \item{kernelf}{\code{signature(object = "rvm")}: returns the used kernel function } \item{kpar}{\code{signature(object = "rvm")}: returns the parameters of the kernel function} \item{lev}{\code{signature(object = "rvm")}: returns the levels of the response (in classification)} \item{mlike}{\code{signature(object = "rvm")}: returns the estimated maximum likelihood} \item{nvar}{\code{signature(object = "rvm")}: returns the calculated variance (in regression)} \item{type}{\code{signature(object = "rvm")}: returns the type of problem} \item{xmatrix}{\code{signature(object = "rvm")}: returns the data matrix used during computation} \item{ymatrix}{\code{signature(object = "rvm")}: returns the used response } } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{rvm}}, \code{\link{ksvm-class}} } \examples{ # create data x <- seq(-20,20,0.1) y <- sin(x)/x + rnorm(401,sd=0.05) # train relevance vector machine foo <- rvm(x, y) foo alpha(foo) RVindex(foo) fitted(foo) kernelf(foo) nvar(foo) ## show slots slotNames(foo) } \keyword{classes} kernlab/man/onlearn.Rd0000644000175100001440000000467612560414652014430 0ustar hornikusers\name{onlearn} \alias{onlearn} \alias{onlearn,onlearn-method} \title{Kernel Online Learning algorithms} \description{ Online Kernel-based Learning algorithms for classification, novelty detection, and regression. } \usage{ \S4method{onlearn}{onlearn}(obj, x, y = NULL, nu = 0.2, lambda = 1e-04) } \arguments{ \item{obj}{\code{obj} an object of class \code{onlearn} created by the initialization function \code{inlearn} containing the kernel to be used during learning and the parameters of the learned model} \item{x}{vector or matrix containing the data. Factors have to be numerically coded. If \code{x} is a matrix the code is run internally one sample at the time.} \item{y}{the class label in case of classification. Only binary classification is supported and class labels have to be -1 or +1. } \item{nu}{the parameter similarly to the \code{nu} parameter in SVM bounds the training error.} \item{lambda}{the learning rate} } \details{ The online algorithms are based on a simple stochastic gradient descent method in feature space. The state of the algorithm is stored in an object of class \code{onlearn} and has to be passed to the function at each iteration. } \value{ The function returns an \code{S4} object of class \code{onlearn} containing the model parameters and the last fitted value which can be retrieved by the accessor method \code{fit}. The value returned in the classification and novelty detection problem is the decision function value phi. The accessor methods \code{alpha} returns the model parameters. } \references{ Kivinen J. Smola A.J. Williamson R.C. \cr \emph{Online Learning with Kernels}\cr IEEE Transactions on Signal Processing vol. 52, Issue 8, 2004\cr \url{http://users.cecs.anu.edu.au/~williams/papers/P172.pdf}} \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{inlearn}}} \examples{ ## create toy data set x <- rbind(matrix(rnorm(100),,2),matrix(rnorm(100)+3,,2)) y <- matrix(c(rep(1,50),rep(-1,50)),,1) ## initialize onlearn object on <- inlearn(2,kernel="rbfdot",kpar=list(sigma=0.2), type="classification") ind <- sample(1:100,100) ## learn one data point at the time for(i in ind) on <- onlearn(on,x[i,],y[i],nu=0.03,lambda=0.1) ## or learn all the data on <- onlearn(on,x[ind,],y[ind],nu=0.03,lambda=0.1) sign(predict(on,x)) } \keyword{classif} \keyword{neural} \keyword{regression} \keyword{ts} kernlab/man/kqr.Rd0000644000175100001440000002055212117365752013562 0ustar hornikusers\name{kqr} \alias{kqr} \alias{kqr,formula-method} \alias{kqr,vector-method} \alias{kqr,matrix-method} \alias{kqr,list-method} \alias{kqr,kernelMatrix-method} \alias{coef,kqr-method} \alias{show,kqr-method} \title{Kernel Quantile Regression.} \description{The Kernel Quantile Regression algorithm \code{kqr} performs non-parametric Quantile Regression.} \usage{ \S4method{kqr}{formula}(x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE) \S4method{kqr}{vector}(x,...) \S4method{kqr}{matrix}(x, y, scaled = TRUE, tau = 0.5, C = 0.1, kernel = "rbfdot", kpar = "automatic", reduced = FALSE, rank = dim(x)[1]/6, fit = TRUE, cross = 0, na.action = na.omit) \S4method{kqr}{kernelMatrix}(x, y, tau = 0.5, C = 0.1, fit = TRUE, cross = 0) \S4method{kqr}{list}(x, y, tau = 0.5, C = 0.1, kernel = "strigdot", kpar= list(length=4, C=0.5), fit = TRUE, cross = 0) } \arguments{ \item{x}{e data or a symbolic description of the model to be fit. When not using a formula x can be a matrix or vector containing the training data or a kernel matrix of class \code{kernelMatrix} of the training data or a list of character vectors (for use with the string kernel). Note, that the intercept is always excluded, whether given in the formula or not.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which \code{kqr} is called from.} \item{y}{a numeric vector or a column matrix containing the response.} \item{scaled}{A logical vector indicating the variables to be scaled. If \code{scaled} is of length 1, the value is recycled as many times as needed and all non-binary variables are scaled. Per default, data are scaled internally (both \code{x} and \code{y} variables) to zero mean and unit variance. The center and scale values are returned and used for later predictions. (default: TRUE)} \item{tau}{the quantile to be estimated, this is generally a number strictly between 0 and 1. For 0.5 the median is calculated. (default: 0.5)} \item{C}{the cost regularization parameter. This parameter controls the smoothness of the fitted function, essentially higher values for C lead to less smooth functions.(default: 1)} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. \code{kernlab} provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". \item \code{lenght, lambda, normalized} for the "stringdot" kernel where length is the length of the strings considered, lambda the decay factor and normalized a logical parameter determining if the kernel evaluations should be normalized. } Hyper-parameters for user defined kernels can be passed through the \code{kpar} parameter as well. In the case of a Radial Basis kernel function (Gaussian) kpar can also be set to the string "automatic" which uses the heuristics in 'sigest' to calculate a good 'sigma' value for the Gaussian RBF or Laplace kernel, from the data. (default = "automatic"). } \item{reduced}{use an incomplete cholesky decomposition to calculate a decomposed form \eqn{Z} of the kernel Matrix \eqn{K} (where \eqn{K = ZZ'}) and perform the calculations with \eqn{Z}. This might be useful when using \code{kqr} with large datasets since normally an n times n kernel matrix would be computed. Setting \code{reduced} to \code{TRUE} makes use of \code{csi} to compute a decomposed form instead and thus only a \eqn{n \times m} matrix where \eqn{m < n} and \eqn{n} the sample size is stored in memory (default: FALSE)} \item{rank}{the rank m of the decomposed matrix calculated when using an incomplete cholesky decomposition. This parameter is only taken into account when \code{reduced} is \code{TRUE}(default : dim(x)[1]/6)} \item{fit}{indicates whether the fitted values should be computed and included in the model or not (default: 'TRUE')} \item{cross}{if a integer value k>0 is specified, a k-fold cross validation on the training data is performed to assess the quality of the model: the Pinball loss and the for quantile regression} \item{subset}{An index vector specifying the cases to be used in the training sample. (NOTE: If given, this argument must be named.)} \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} \item{\dots}{additional parameters.} } \details{In quantile regression a function is fitted to the data so that it satisfies the property that a portion \eqn{tau} of the data \eqn{y|n} is below the estimate. While the error bars of many regression problems can be viewed as such estimates quantile regression estimates this quantity directly. Kernel quantile regression is similar to nu-Support Vector Regression in that it minimizes a regularized loss function in RKHS. The difference between nu-SVR and kernel quantile regression is in the type of loss function used which in the case of quantile regression is the pinball loss (see reference for details.). Minimizing the regularized loss boils down to a quadratic problem which is solved using an interior point QP solver \code{ipop} implemented in \code{kernlab}. } \value{ An S4 object of class \code{kqr} containing the fitted model along with information.Accessor functions can be used to access the slots of the object which include : \item{alpha}{The resulting model parameters which can be also accessed by \code{coef}.} \item{kernelf}{the kernel function used.} \item{error}{Training error (if fit == TRUE)} see \code{kqr-class} for more details. } \references{Ichiro Takeuchi, Quoc V. Le, Timothy D. Sears, Alexander J. Smola\cr \emph{Nonparametric Quantile Estimation}\cr Journal of Machine Learning Research 7,2006,1231-1264 \cr \url{http://www.jmlr.org/papers/volume7/takeuchi06a/takeuchi06a.pdf} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{predict.kqr}}, \code{\link{kqr-class}}, \code{\link{ipop}}, \code{\link{rvm}}, \code{\link{ksvm}}} \examples{ # create data x <- sort(runif(300)) y <- sin(pi*x) + rnorm(300,0,sd=exp(sin(2*pi*x))) # first calculate the median qrm <- kqr(x, y, tau = 0.5, C=0.15) # predict and plot plot(x, y) ytest <- predict(qrm, x) lines(x, ytest, col="blue") # calculate 0.9 quantile qrm <- kqr(x, y, tau = 0.9, kernel = "rbfdot", kpar= list(sigma=10), C=0.15) ytest <- predict(qrm, x) lines(x, ytest, col="red") # calculate 0.1 quantile qrm <- kqr(x, y, tau = 0.1,C=0.15) ytest <- predict(qrm, x) lines(x, ytest, col="green") # print first 10 model coefficients coef(qrm)[1:10] } \keyword{regression} \keyword{nonlinear} \keyword{methods} kernlab/man/couple.Rd0000644000175100001440000000363211304023134014232 0ustar hornikusers\name{couple} \alias{couple} \title{Probabilities Coupling function} \description{ \code{couple} is used to link class-probability estimates produced by pairwise coupling in multi-class classification problems. } \usage{ couple(probin, coupler = "minpair") } \arguments{ \item{probin}{ The pairwise coupled class-probability estimates} \item{coupler}{The type of coupler to use. Currently \code{minpar} and \code{pkpd} and \code{vote} are supported (see reference for more details). If \code{vote} is selected the returned value is a primitive estimate passed on given votes.} } \details{ As binary classification problems are much easier to solve many techniques exist to decompose multi-class classification problems into many binary classification problems (voting, error codes, etc.). Pairwise coupling (one against one) constructs a rule for discriminating between every pair of classes and then selecting the class with the most winning two-class decisions. By using Platt's probabilities output for SVM one can get a class probability for each of the \eqn{k(k-1)/2} models created in the pairwise classification. The couple method implements various techniques to combine these probabilities. } \value{ A matrix with the resulting probability estimates. } \references{ Ting-Fan Wu, Chih-Jen Lin, ruby C. Weng\cr \emph{Probability Estimates for Multi-class Classification by Pairwise Coupling}\cr Neural Information Processing Symposium 2003 \cr \url{http://books.nips.cc/papers/files/nips16/NIPS2003_0538.pdf} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at} } \seealso{ \code{\link{predict.ksvm}}, \code{\link{ksvm}}} \examples{ ## create artificial pairwise probabilities pairs <- matrix(c(0.82,0.12,0.76,0.1,0.9,0.05),2) couple(pairs) couple(pairs, coupler="pkpd") couple(pairs, coupler ="vote") } \keyword{classif} kernlab/man/kpca.Rd0000644000175100001440000001207412560414652013677 0ustar hornikusers\name{kpca} \alias{kpca} \alias{kpca,formula-method} \alias{kpca,matrix-method} \alias{kpca,kernelMatrix-method} \alias{kpca,list-method} \alias{predict,kpca-method} \title{Kernel Principal Components Analysis} \description{ Kernel Principal Components Analysis is a nonlinear form of principal component analysis.} \usage{ \S4method{kpca}{formula}(x, data = NULL, na.action, ...) \S4method{kpca}{matrix}(x, kernel = "rbfdot", kpar = list(sigma = 0.1), features = 0, th = 1e-4, na.action = na.omit, ...) \S4method{kpca}{kernelMatrix}(x, features = 0, th = 1e-4, ...) \S4method{kpca}{list}(x, kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), features = 0, th = 1e-4, na.action = na.omit, ...) } \arguments{ \item{x}{the data matrix indexed by row or a formula describing the model, or a kernel Matrix of class \code{kernelMatrix}, or a list of character vectors} \item{data}{an optional data frame containing the variables in the model (when using a formula).} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{features}{Number of features (principal components) to return. (default: 0 , all)} \item{th}{the value of the eigenvalue under which principal components are ignored (only valid when features = 0). (default : 0.0001) } \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} \item{\dots}{ additional parameters} } \details{Using kernel functions one can efficiently compute principal components in high-dimensional feature spaces, related to input space by some non-linear map.\cr The data can be passed to the \code{kpca} function in a \code{matrix} or a \code{data.frame}, in addition \code{kpca} also supports input in the form of a kernel matrix of class \code{kernelMatrix} or as a list of character vectors where a string kernel has to be used. } \value{ An S4 object containing the principal component vectors along with the corresponding eigenvalues. \item{pcv}{a matrix containing the principal component vectors (column wise)} \item{eig}{The corresponding eigenvalues} \item{rotated}{The original data projected (rotated) on the principal components} \item{xmatrix}{The original data matrix} all the slots of the object can be accessed by accessor functions. } \note{The predict function can be used to embed new data on the new space} \references{ Schoelkopf B., A. Smola, K.-R. Mueller :\cr \emph{Nonlinear component analysis as a kernel eigenvalue problem}\cr Neural Computation 10, 1299-1319\cr \url{http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.29.1366} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{kcca}}, \code{pca}} \examples{ # another example using the iris data(iris) test <- sample(1:150,20) kpc <- kpca(~.,data=iris[-test,-5],kernel="rbfdot", kpar=list(sigma=0.2),features=2) #print the principal component vectors pcv(kpc) #plot the data projection on the components plot(rotated(kpc),col=as.integer(iris[-test,5]), xlab="1st Principal Component",ylab="2nd Principal Component") #embed remaining points emb <- predict(kpc,iris[test,-5]) points(emb,col=as.integer(iris[test,5])) } \keyword{cluster} kernlab/man/spirals.Rd0000644000175100001440000000054311304023134014416 0ustar hornikusers\name{spirals} \alias{spirals} \title{Spirals Dataset} \description{A toy data set representing two spirals with Gaussian noise. The data was created with the \code{mlbench.spirals} function in \code{mlbench}. } \usage{data(spirals)} \format{ A matrix with 300 observations and 2 variables. } \examples{ data(spirals) plot(spirals) } \keyword{datasets} kernlab/man/predict.gausspr.Rd0000644000175100001440000000416612117365151016076 0ustar hornikusers\name{predict.gausspr} \alias{predict.gausspr} \alias{predict,gausspr-method} \title{predict method for Gaussian Processes object} \description{Prediction of test data using Gaussian Processes} \usage{ \S4method{predict}{gausspr}(object, newdata, type = "response", coupler = "minpair") } \arguments{ \item{object}{an S4 object of class \code{gausspr} created by the \code{gausspr} function} \item{newdata}{a data frame or matrix containing new data} \item{type}{one of \code{response}, \code{probabilities} indicating the type of output: predicted values or matrix of class probabilities} \item{coupler}{Coupling method used in the multiclass case, can be one of \code{minpair} or \code{pkpd} (see reference for more details).} } \value{ \item{response}{predicted classes (the classes with majority vote) or the response value in regression.} \item{probabilities}{matrix of class probabilities (one column for each class and one row for each input).} } \references{ \itemize{ \item C. K. I. Williams and D. Barber \cr Bayesian classification with Gaussian processes. \cr IEEE Transactions on Pattern Analysis and Machine Intelligence, 20(12):1342-1351, 1998\cr \url{http://www.dai.ed.ac.uk/homes/ckiw/postscript/pami_final.ps.gz} \item T.F. Wu, C.J. Lin, R.C. Weng. \cr \emph{Probability estimates for Multi-class Classification by Pairwise Coupling}\cr \url{http://www.csie.ntu.edu.tw/~cjlin/papers/svmprob/svmprob.pdf} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \keyword{methods} \keyword{regression} \keyword{classif} \examples{ ## example using the promotergene data set data(promotergene) ## create test and training set ind <- sample(1:dim(promotergene)[1],20) genetrain <- promotergene[-ind, ] genetest <- promotergene[ind, ] ## train a support vector machine gene <- gausspr(Class~.,data=genetrain,kernel="rbfdot", kpar=list(sigma=0.015)) gene ## predict gene type probabilities on the test set genetype <- predict(gene,genetest,type="probabilities") genetype } kernlab/man/kqr-class.Rd0000644000175100001440000001051412117363316014654 0ustar hornikusers\name{kqr-class} \docType{class} \alias{kqr-class} \alias{alpha,kqr-method} \alias{cross,kqr-method} \alias{error,kqr-method} \alias{kcall,kqr-method} \alias{kernelf,kqr-method} \alias{kpar,kqr-method} \alias{param,kqr-method} \alias{alphaindex,kqr-method} \alias{b,kqr-method} \alias{xmatrix,kqr-method} \alias{ymatrix,kqr-method} \alias{scaling,kqr-method} \title{Class "kqr"} \description{The Kernel Quantile Regression object class} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("kqr", ...)}. or by calling the \code{kqr} function } \section{Slots}{ \describe{ \item{\code{kernelf}:}{Object of class \code{"kfunction"} contains the kernel function used} \item{\code{kpar}:}{Object of class \code{"list"} contains the kernel parameter used } \item{\code{coef}:}{Object of class \code{"ANY"} containing the model parameters} \item{\code{param}:}{Object of class \code{"list"} contains the cost parameter C and tau parameter used } \item{\code{kcall}:}{Object of class \code{"list"} contains the used function call } \item{\code{terms}:}{Object of class \code{"ANY"} contains the terms representation of the symbolic model used (when using a formula)} \item{\code{xmatrix}:}{Object of class \code{"input"} containing the data matrix used } \item{\code{ymatrix}:}{Object of class \code{"output"} containing the response matrix} \item{\code{fitted}:}{Object of class \code{"output"} containing the fitted values } \item{\code{alpha}:}{Object of class \code{"listI"} containing the computes alpha values } \item{\code{b}:}{Object of class \code{"numeric"} containing the offset of the model.} \item{\code{scaling}}{Object of class \code{"ANY"} containing the scaling coefficients of the data (when case \code{scaled = TRUE} is used).} \item{\code{error}:}{Object of class \code{"numeric"} containing the training error} \item{\code{cross}:}{Object of class \code{"numeric"} containing the cross validation error} \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed in NA } \item{\code{nclass}:}{Inherited from class \code{vm}, not used in kqr} \item{\code{lev}:}{Inherited from class \code{vm}, not used in kqr} \item{\code{type}:}{Inherited from class \code{vm}, not used in kqr} } } \section{Methods}{ \describe{ \item{coef}{\code{signature(object = "kqr")}: returns the coefficients (alpha) of the model} \item{alpha}{\code{signature(object = "kqr")}: returns the alpha vector (identical to \code{coef})} \item{b}{\code{signature(object = "kqr")}: returns the offset beta of the model.} \item{cross}{\code{signature(object = "kqr")}: returns the cross validation error } \item{error}{\code{signature(object = "kqr")}: returns the training error } \item{fitted}{\code{signature(object = "vm")}: returns the fitted values } \item{kcall}{\code{signature(object = "kqr")}: returns the call performed} \item{kernelf}{\code{signature(object = "kqr")}: returns the kernel function used} \item{kpar}{\code{signature(object = "kqr")}: returns the kernel parameter used} \item{param}{\code{signature(object = "kqr")}: returns the cost regularization parameter C and tau used} \item{xmatrix}{\code{signature(object = "kqr")}: returns the data matrix used} \item{ymatrix}{\code{signature(object = "kqr")}: returns the response matrix used} \item{scaling}{\code{signature(object = "kqr")}: returns the scaling coefficients of the data (when \code{scaled = TRUE} is used)} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{kqr}}, \code{\link{vm-class}}, \code{\link{ksvm-class}} } \examples{ # create data x <- sort(runif(300)) y <- sin(pi*x) + rnorm(300,0,sd=exp(sin(2*pi*x))) # first calculate the median qrm <- kqr(x, y, tau = 0.5, C=0.15) # predict and plot plot(x, y) ytest <- predict(qrm, x) lines(x, ytest, col="blue") # calculate 0.9 quantile qrm <- kqr(x, y, tau = 0.9, kernel = "rbfdot", kpar = list(sigma = 10), C = 0.15) ytest <- predict(qrm, x) lines(x, ytest, col="red") # print model coefficients and other information coef(qrm) b(qrm) error(qrm) kernelf(qrm) } \keyword{classes} kernlab/man/kfa-class.Rd0000644000175100001440000000371511304023134014611 0ustar hornikusers\name{kfa-class} \docType{class} \alias{kfa-class} \alias{alpha,kfa-method} \alias{alphaindex,kfa-method} \alias{kcall,kfa-method} \alias{kernelf,kfa-method} \alias{predict,kfa-method} \alias{xmatrix,kfa-method} \title{Class "kfa"} \description{The class of the object returned by the Kernel Feature Analysis \code{kfa} function} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("kfa", ...)} or by calling the \code{kfa} method. The objects contain the features along with the alpha values. } \section{Slots}{ \describe{ \item{\code{alpha}:}{Object of class \code{"matrix"} containing the alpha values } \item{\code{alphaindex}:}{Object of class \code{"vector"} containing the indexes of the selected feature} \item{\code{kernelf}:}{Object of class \code{"kfunction"} containing the kernel function used} \item{\code{xmatrix}:}{Object of class \code{"matrix"} containing the selected features} \item{\code{kcall}:}{Object of class \code{"call"} containing the \code{kfa} function call} \item{\code{terms}:}{Object of class \code{"ANY"} containing the formula terms} } } \section{Methods}{ \describe{ \item{alpha}{\code{signature(object = "kfa")}: returns the alpha values } \item{alphaindex}{\code{signature(object = "kfa")}: returns the index of the selected features} \item{kcall}{\code{signature(object = "kfa")}: returns the function call } \item{kernelf}{\code{signature(object = "kfa")}: returns the kernel function used } \item{predict}{\code{signature(object = "kfa")}: used to embed more data points to the feature base} \item{xmatrix}{\code{signature(object = "kfa")}: returns the selected features. } } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{kfa}}, \code{\link{kpca-class}} } \examples{ data(promotergene) f <- kfa(~.,data=promotergene) } \keyword{classes} kernlab/man/kmmd.Rd0000644000175100001440000001223512560414652013710 0ustar hornikusers\name{kmmd} \alias{kmmd} \alias{kmmd,matrix-method} \alias{kmmd,list-method} \alias{kmmd,kernelMatrix-method} \alias{show,kmmd-method} \alias{H0} \alias{Asymbound} \alias{Radbound} \alias{mmdstats} \alias{AsympH0} \title{Kernel Maximum Mean Discrepancy.} \description{The Kernel Maximum Mean Discrepancy \code{kmmd} performs a non-parametric distribution test.} \usage{ \S4method{kmmd}{matrix}(x, y, kernel="rbfdot",kpar="automatic", alpha = 0.05, asymptotic = FALSE, replace = TRUE, ntimes = 150, frac = 1, ...) \S4method{kmmd}{kernelMatrix}(x, y, Kxy, alpha = 0.05, asymptotic = FALSE, replace = TRUE, ntimes = 100, frac = 1, ...) \S4method{kmmd}{list}(x, y, kernel="stringdot", kpar = list(type = "spectrum", length = 4), alpha = 0.05, asymptotic = FALSE, replace = TRUE, ntimes = 150, frac = 1, ...) } \arguments{ \item{x}{data values, in a \code{matrix}, \code{list}, or \code{kernelMatrix}} \item{y}{data values, in a \code{matrix}, \code{list}, or \code{kernelMatrix}} \item{Kxy}{\code{kernlMatrix} between \eqn{x} and \eqn{y} values (only for the kernelMatrix interface)} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. \code{kernlab} provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". \item \code{lenght, lambda, normalized} for the "stringdot" kernel where length is the length of the strings considered, lambda the decay factor and normalized a logical parameter determining if the kernel evaluations should be normalized. } Hyper-parameters for user defined kernels can be passed through the \code{kpar} parameter as well. In the case of a Radial Basis kernel function (Gaussian) kpar can also be set to the string "automatic" which uses the heuristics in 'sigest' to calculate a good 'sigma' value for the Gaussian RBF or Laplace kernel, from the data. (default = "automatic"). } \item{alpha}{the confidence level of the test (default: 0.05)} \item{asymptotic}{calculate the bounds asymptotically (suitable for smaller datasets) (default: FALSE)} \item{replace}{use replace when sampling for computing the asymptotic bounds (default : TRUE)} \item{ntimes}{number of times repeating the sampling procedure (default : 150)} \item{frac}{fraction of points to sample (frac : 1) } \item{\dots}{additional parameters.} } \details{\code{kmmd} calculates the kernel maximum mean discrepancy for samples from two distributions and conducts a test as to whether the samples are from different distributions with level \code{alpha}. } \value{ An S4 object of class \code{kmmd} containing the results of whether the H0 hypothesis is rejected or not. H0 being that the samples \eqn{x} and \eqn{y} come from the same distribution. The object contains the following slots : \item{\code{H0}}{is H0 rejected (logical)} \item{\code{AsympH0}}{is H0 rejected according to the asymptotic bound (logical)} \item{\code{kernelf}}{the kernel function used.} \item{\code{mmdstats}}{the test statistics (vector of two)} \item{\code{Radbound}}{the Rademacher bound} \item{\code{Asymbound}}{the asymptotic bound} see \code{kmmd-class} for more details. } \references{Gretton, A., K. Borgwardt, M. Rasch, B. Schoelkopf and A. Smola\cr \emph{A Kernel Method for the Two-Sample-Problem}\cr Neural Information Processing Systems 2006, Vancouver \cr \url{http://papers.nips.cc/paper/3110-a-kernel-method-for-the-two-sample-problem.pdf} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{ksvm}} \examples{ # create data x <- matrix(runif(300),100) y <- matrix(runif(300)+1,100) mmdo <- kmmd(x, y) mmdo } \keyword{htest} \keyword{nonlinear} \keyword{nonparametric} kernlab/man/as.kernelMatrix.Rd0000644000175100001440000000230411304023134016005 0ustar hornikusers\name{as.kernelMatrix} \docType{methods} \alias{kernelMatrix-class} \alias{as.kernelMatrix} \alias{as.kernelMatrix-methods} \alias{as.kernelMatrix,matrix-method} \title{Assing kernelMatrix class to matrix objects} \description{\code{as.kernelMatrix} in package \pkg{kernlab} can be used to coerce the kernelMatrix class to matrix objects representing a kernel matrix. These matrices can then be used with the kernelMatrix interfaces which most of the functions in \pkg{kernlab} support.} \usage{ \S4method{as.kernelMatrix}{matrix}(x, center = FALSE) } \arguments{ \item{x}{matrix to be assigned the \code{kernelMatrix} class } \item{center}{center the kernel matrix in feature space (default: FALSE) } } \author{ Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at} } \seealso{\code{\link{kernelMatrix}}, \code{\link{dots}}} \keyword{methods} \examples{ ## Create toy data x <- rbind(matrix(rnorm(10),,2),matrix(rnorm(10,mean=3),,2)) y <- matrix(c(rep(1,5),rep(-1,5))) ### Use as.kernelMatrix to label the cov. matrix as a kernel matrix ### which is eq. to using a linear kernel K <- as.kernelMatrix(crossprod(t(x))) K svp2 <- ksvm(K, y, type="C-svc") svp2 } kernlab/man/musk.Rd0000644000175100001440000000257011304023134013722 0ustar hornikusers\name{musk} \alias{musk} \docType{data} \title{Musk data set} \description{ This dataset describes a set of 92 molecules of which 47 are judged by human experts to be musks and the remaining 45 molecules are judged to be non-musks. } \usage{data(musk)} \format{ A data frame with 476 observations on the following 167 variables. Variables 1-162 are "distance features" along rays. The distances are measured in hundredths of Angstroms. The distances may be negative or positive, since they are actually measured relative to an origin placed along each ray. The origin was defined by a "consensus musk" surface that is no longer used. Hence, any experiments with the data should treat these feature values as lying on an arbitrary continuous scale. In particular, the algorithm should not make any use of the zero point or the sign of each feature value. Variable 163 is the distance of the oxygen atom in the molecule to a designated point in 3-space. This is also called OXY-DIS. Variable 164 is the X-displacement from the designated point. Variable 165 is the Y-displacement from the designated point. Variable 166 is the Z-displacement from the designated point. Class: 0 for non-musk, and 1 for musk } \source{ UCI Machine Learning data repository \cr } \examples{ data(musk) muskm <- ksvm(Class~.,data=musk,kernel="rbfdot",C=1000) muskm } \keyword{datasets} kernlab/man/csi.Rd0000644000175100001440000001231012560414652013530 0ustar hornikusers\name{csi} \docType{methods} \alias{csi} \alias{csi-methods} \alias{csi,matrix-method} \title{Cholesky decomposition with Side Information} \description{ The \code{csi} function in \pkg{kernlab} is an implementation of an incomplete Cholesky decomposition algorithm which exploits side information (e.g., classification labels, regression responses) to compute a low rank decomposition of a kernel matrix from the data. } \usage{ \S4method{csi}{matrix}(x, y, kernel="rbfdot", kpar=list(sigma=0.1), rank, centering = TRUE, kappa = 0.99 ,delta = 40 ,tol = 1e-5) } \arguments{ \item{x}{The data matrix indexed by row} \item{y}{the classification labels or regression responses. In classification y is a \eqn{m \times n} matrix where \eqn{m} the number of data and \eqn{n} the number of classes \eqn{y} and \eqn{y_i} is 1 if the corresponding x belongs to class i.} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class \code{kernel}, which computes the inner product in feature space between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well. } \item{rank}{maximal rank of the computed kernel matrix} \item{centering}{if \code{TRUE} centering is performed (default: TRUE)} \item{kappa}{trade-off between approximation of K and prediction of Y (default: 0.99)} \item{delta}{number of columns of cholesky performed in advance (default: 40)} \item{tol}{minimum gain at each iteration (default: 1e-4)} } \details{An incomplete cholesky decomposition calculates \eqn{Z} where \eqn{K= ZZ'} \eqn{K} being the kernel matrix. Since the rank of a kernel matrix is usually low, \eqn{Z} tends to be smaller then the complete kernel matrix. The decomposed matrix can be used to create memory efficient kernel-based algorithms without the need to compute and store a complete kernel matrix in memory. \cr \code{csi} uses the class labels, or regression responses to compute a more appropriate approximation for the problem at hand considering the additional information from the response variable. } \value{ An S4 object of class "csi" which is an extension of the class "matrix". The object is the decomposed kernel matrix along with the slots : \item{pivots}{Indices on which pivots where done} \item{diagresidues}{Residuals left on the diagonal} \item{maxresiduals}{Residuals picked for pivoting} \item{predgain}{predicted gain before adding each column} \item{truegain}{actual gain after adding each column} \item{Q}{QR decomposition of the kernel matrix} \item{R}{QR decomposition of the kernel matrix} slots can be accessed either by \code{object@slot} or by accessor functions with the same name (e.g., \code{pivots(object))}} \references{ Francis R. Bach, Michael I. Jordan\cr \emph{Predictive low-rank decomposition for kernel methods.}\cr Proceedings of the Twenty-second International Conference on Machine Learning (ICML) 2005\cr \url{http://www.di.ens.fr/~fbach/bach_jordan_csi.pdf} } \author{Alexandros Karatzoglou (based on Matlab code by Francis Bach)\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{inchol}}, \code{\link{chol}}, \code{\link{csi-class}}} \examples{ data(iris) ## create multidimensional y matrix yind <- t(matrix(1:3,3,150)) ymat <- matrix(0, 150, 3) ymat[yind==as.integer(iris[,5])] <- 1 datamatrix <- as.matrix(iris[,-5]) # initialize kernel function rbf <- rbfdot(sigma=0.1) rbf Z <- csi(datamatrix,ymat, kernel=rbf, rank = 30) dim(Z) pivots(Z) # calculate kernel matrix K <- crossprod(t(Z)) # difference between approximated and real kernel matrix (K - kernelMatrix(kernel=rbf, datamatrix))[6,] } \keyword{methods} \keyword{algebra} \keyword{array} kernlab/man/ksvm-class.Rd0000644000175100001440000001532112117364353015042 0ustar hornikusers\name{ksvm-class} \docType{class} \alias{ksvm-class} \alias{SVindex} \alias{alphaindex} \alias{prob.model} \alias{scaling} \alias{prior} \alias{show} \alias{param} \alias{b} \alias{obj} \alias{nSV} \alias{coef,vm-method} \alias{SVindex,ksvm-method} \alias{alpha,ksvm-method} \alias{alphaindex,ksvm-method} \alias{cross,ksvm-method} \alias{error,ksvm-method} \alias{param,ksvm-method} \alias{fitted,ksvm-method} \alias{prior,ksvm-method} \alias{prob.model,ksvm-method} \alias{kernelf,ksvm-method} \alias{kpar,ksvm-method} \alias{lev,ksvm-method} \alias{kcall,ksvm-method} \alias{scaling,ksvm-method} \alias{type,ksvm-method} \alias{xmatrix,ksvm-method} \alias{ymatrix,ksvm-method} \alias{b,ksvm-method} \alias{obj,ksvm-method} \alias{nSV,ksvm-method} \title{Class "ksvm" } \description{An S4 class containing the output (model) of the \code{ksvm} Support Vector Machines function } \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("ksvm", ...)} or by calls to the \code{ksvm} function. } \section{Slots}{ \describe{ \item{\code{type}:}{Object of class \code{"character"} containing the support vector machine type ("C-svc", "nu-svc", "C-bsvc", "spoc-svc", "one-svc", "eps-svr", "nu-svr", "eps-bsvr")} \item{\code{param}:}{Object of class \code{"list"} containing the Support Vector Machine parameters (C, nu, epsilon)} \item{\code{kernelf}:}{Object of class \code{"function"} containing the kernel function} \item{\code{kpar}:}{Object of class \code{"list"} containing the kernel function parameters (hyperparameters)} \item{\code{kcall}:}{Object of class \code{"ANY"} containing the \code{ksvm} function call} \item{\code{scaling}:}{Object of class \code{"ANY"} containing the scaling information performed on the data} \item{\code{terms}:}{Object of class \code{"ANY"} containing the terms representation of the symbolic model used (when using a formula)} \item{\code{xmatrix}:}{Object of class \code{"input"} (\code{"list"} for multiclass problems or \code{"matrix"} for binary classification and regression problems) containing the support vectors calculated from the data matrix used during computations (possibly scaled and without NA). In the case of multi-class classification each list entry contains the support vectors from each binary classification problem from the one-against-one method.} \item{\code{ymatrix}:}{Object of class \code{"output"} the response \code{"matrix"} or \code{"factor"} or \code{"vector"} or \code{"logical"}} \item{\code{fitted}:}{Object of class \code{"output"} with the fitted values, predictions using the training set.} \item{\code{lev}:}{Object of class \code{"vector"} with the levels of the response (in the case of classification)} \item{\code{prob.model}:}{Object of class \code{"list"} with the class prob. model} \item{\code{prior}:}{Object of class \code{"list"} with the prior of the training set} \item{\code{nclass}:}{Object of class \code{"numeric"} containing the number of classes (in the case of classification)} \item{\code{alpha}:}{Object of class \code{"listI"} containing the resulting alpha vector (\code{"list"} or \code{"matrix"} in case of multiclass classification) (support vectors)} \item{\code{coef}:}{Object of class \code{"ANY"} containing the resulting coefficients} \item{\code{alphaindex}:}{Object of class \code{"list"} containing} \item{\code{b}:}{Object of class \code{"numeric"} containing the resulting offset } \item{\code{SVindex}:}{Object of class \code{"vector"} containing the indexes of the support vectors} \item{\code{nSV}:}{Object of class \code{"numeric"} containing the number of support vectors } \item{\code{obj}:}{Object of class \code{vector} containing the value of the objective function. When using one-against-one in multiclass classification this is a vector.} \item{\code{error}:}{Object of class \code{"numeric"} containing the training error} \item{\code{cross}:}{Object of class \code{"numeric"} containing the cross-validation error } \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed for NA } } } \section{Methods}{ \describe{ \item{SVindex}{\code{signature(object = "ksvm")}: return the indexes of support vectors} \item{alpha}{\code{signature(object = "ksvm")}: returns the complete 5 alpha vector (wit zero values)} \item{alphaindex}{\code{signature(object = "ksvm")}: returns the indexes of non-zero alphas (support vectors)} \item{cross}{\code{signature(object = "ksvm")}: returns the cross-validation error } \item{error}{\code{signature(object = "ksvm")}: returns the training error } \item{obj}{\code{signature(object = "ksvm")}: returns the value of the objective function} \item{fitted}{\code{signature(object = "vm")}: returns the fitted values (predict on training set) } \item{kernelf}{\code{signature(object = "ksvm")}: returns the kernel function} \item{kpar}{\code{signature(object = "ksvm")}: returns the kernel parameters (hyperparameters)} \item{lev}{\code{signature(object = "ksvm")}: returns the levels in case of classification } \item{prob.model}{\code{signature(object="ksvm")}: returns class prob. model values} \item{param}{\code{signature(object="ksvm")}: returns the parameters of the SVM in a list (C, epsilon, nu etc.)} \item{prior}{\code{signature(object="ksvm")}: returns the prior of the training set} \item{kcall}{\code{signature(object="ksvm")}: returns the \code{ksvm} function call} \item{scaling}{\code{signature(object = "ksvm")}: returns the scaling values } \item{show}{\code{signature(object = "ksvm")}: prints the object information} \item{type}{\code{signature(object = "ksvm")}: returns the problem type} \item{xmatrix}{\code{signature(object = "ksvm")}: returns the data matrix used} \item{ymatrix}{\code{signature(object = "ksvm")}: returns the response vector} } } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzolgou@ci.tuwien.ac.at}} \seealso{ \code{\link{ksvm}}, \code{\link{rvm-class}}, \code{\link{gausspr-class}} } \examples{ ## simple example using the promotergene data set data(promotergene) ## train a support vector machine gene <- ksvm(Class~.,data=promotergene,kernel="rbfdot", kpar=list(sigma=0.015),C=50,cross=4) gene # the kernel function kernelf(gene) # the alpha values alpha(gene) # the coefficients coef(gene) # the fitted values fitted(gene) # the cross validation error cross(gene) } \keyword{classes} kernlab/man/csi-class.Rd0000644000175100001440000000545411304023134014630 0ustar hornikusers\name{csi-class} \docType{class} \alias{csi-class} \alias{Q} \alias{R} \alias{predgain} \alias{truegain} \alias{diagresidues,csi-method} \alias{maxresiduals,csi-method} \alias{pivots,csi-method} \alias{predgain,csi-method} \alias{truegain,csi-method} \alias{Q,csi-method} \alias{R,csi-method} \title{Class "csi"} \description{The reduced Cholesky decomposition object} \section{Objects from the Class}{Objects can be created by calls of the form \code{new("csi", ...)}. or by calling the \code{csi} function.} \section{Slots}{ \describe{ \item{\code{.Data}:}{Object of class \code{"matrix"} contains the decomposed matrix} \item{\code{pivots}:}{Object of class \code{"vector"} contains the pivots performed} \item{\code{diagresidues}:}{Object of class \code{"vector"} contains the diagonial residues} \item{\code{maxresiduals}:}{Object of class \code{"vector"} contains the maximum residues} \item{predgain}{Object of class \code{"vector"} contains the predicted gain before adding each column} \item{truegain}{Object of class \code{"vector"} contains the actual gain after adding each column} \item{Q}{Object of class \code{"matrix"} contains Q from the QR decomposition of the kernel matrix} \item{R}{Object of class \code{"matrix"} contains R from the QR decomposition of the kernel matrix} } } \section{Extends}{ Class \code{"matrix"}, directly. } \section{Methods}{ \describe{ \item{diagresidues}{\code{signature(object = "csi")}: returns the diagonial residues} \item{maxresiduals}{\code{signature(object = "csi")}: returns the maximum residues} \item{pivots}{\code{signature(object = "csi")}: returns the pivots performed} \item{predgain}{\code{signature(object = "csi")}: returns the predicted gain before adding each column} \item{truegain}{\code{signature(object = "csi")}: returns the actual gain after adding each column} \item{Q}{\code{signature(object = "csi")}: returns Q from the QR decomposition of the kernel matrix} \item{R}{\code{signature(object = "csi")}: returns R from the QR decomposition of the kernel matrix} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{csi}}, \code{\link{inchol-class}}} \examples{ data(iris) ## create multidimensional y matrix yind <- t(matrix(1:3,3,150)) ymat <- matrix(0, 150, 3) ymat[yind==as.integer(iris[,5])] <- 1 datamatrix <- as.matrix(iris[,-5]) # initialize kernel function rbf <- rbfdot(sigma=0.1) rbf Z <- csi(datamatrix,ymat, kernel=rbf, rank = 30) dim(Z) pivots(Z) # calculate kernel matrix K <- crossprod(t(Z)) # difference between approximated and real kernel matrix (K - kernelMatrix(kernel=rbf, datamatrix))[6,] } \keyword{classes} kernlab/man/vm-class.Rd0000644000175100001440000000732511304023134014473 0ustar hornikusers\name{vm-class} \docType{class} \alias{vm-class} \alias{cross} \alias{alpha} \alias{error} \alias{type} \alias{kernelf} \alias{xmatrix} \alias{ymatrix} \alias{lev} \alias{kcall} \alias{alpha,vm-method} \alias{cross,vm-method} \alias{error,vm-method} \alias{fitted,vm-method} \alias{kernelf,vm-method} \alias{kpar,vm-method} \alias{lev,vm-method} \alias{kcall,vm-method} \alias{type,vm-method} \alias{xmatrix,vm-method} \alias{ymatrix,vm-method} \title{Class "vm" } \description{An S4 VIRTUAL class used as a base for the various vector machine classes in \pkg{kernlab}} \section{Objects from the Class}{ Objects from the class cannot be created directly but only contained in other classes. } \section{Slots}{ \describe{ \item{\code{alpha}:}{Object of class \code{"listI"} containing the resulting alpha vector (list in case of multiclass classification) (support vectors)} \item{\code{type}:}{Object of class \code{"character"} containing the vector machine type e.g., ("C-svc", "nu-svc", "C-bsvc", "spoc-svc", "one-svc", "eps-svr", "nu-svr", "eps-bsvr")} \item{\code{kernelf}:}{Object of class \code{"function"} containing the kernel function} \item{\code{kpar}:}{Object of class \code{"list"} containing the kernel function parameters (hyperparameters)} \item{\code{kcall}:}{Object of class \code{"call"} containing the function call} \item{\code{terms}:}{Object of class \code{"ANY"} containing the terms representation of the symbolic model used (when using a formula)} \item{\code{xmatrix}:}{Object of class \code{"input"} the data matrix used during computations (support vectors) (possibly scaled and without NA)} \item{\code{ymatrix}:}{Object of class \code{"output"} the response matrix/vector } \item{\code{fitted}:}{Object of class \code{"output"} with the fitted values, predictions using the training set.} \item{\code{lev}:}{Object of class \code{"vector"} with the levels of the response (in the case of classification)} \item{\code{nclass}:}{Object of class \code{"numeric"} containing the number of classes (in the case of classification)} \item{\code{error}:}{Object of class \code{"vector"} containing the training error} \item{\code{cross}:}{Object of class \code{"vector"} containing the cross-validation error } \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed for NA } } } \section{Methods}{ \describe{ \item{alpha}{\code{signature(object = "vm")}: returns the complete alpha vector (wit zero values)} \item{cross}{\code{signature(object = "vm")}: returns the cross-validation error } \item{error}{\code{signature(object = "vm")}: returns the training error } \item{fitted}{\code{signature(object = "vm")}: returns the fitted values (predict on training set) } \item{kernelf}{\code{signature(object = "vm")}: returns the kernel function} \item{kpar}{\code{signature(object = "vm")}: returns the kernel parameters (hyperparameters)} \item{lev}{\code{signature(object = "vm")}: returns the levels in case of classification } \item{kcall}{\code{signature(object="vm")}: returns the function call} \item{type}{\code{signature(object = "vm")}: returns the problem type} \item{xmatrix}{\code{signature(object = "vm")}: returns the data matrix used(support vectors)} \item{ymatrix}{\code{signature(object = "vm")}: returns the response vector} } } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzolgou@ci.tuwien.ac.at}} \seealso{ \code{\link{ksvm-class}}, \code{\link{rvm-class}}, \code{\link{gausspr-class}} } \keyword{classes} kernlab/man/ticdata.Rd0000644000175100001440000002013411304023134014350 0ustar hornikusers\name{ticdata} \alias{ticdata} \title{The Insurance Company Data} \description{ This data set used in the CoIL 2000 Challenge contains information on customers of an insurance company. The data consists of 86 variables and includes product usage data and socio-demographic data derived from zip area codes. The data was collected to answer the following question: Can you predict who would be interested in buying a caravan insurance policy and give an explanation why ? } \usage{data(ticdata)} \format{ ticdata: Dataset to train and validate prediction models and build a description (9822 customer records). Each record consists of 86 attributes, containing sociodemographic data (attribute 1-43) and product ownership (attributes 44-86). The sociodemographic data is derived from zip codes. All customers living in areas with the same zip code have the same sociodemographic attributes. Attribute 86, \code{CARAVAN:Number of mobile home policies}, is the target variable. Data Format \tabular{rlll}{ \tab 1 \tab \code{STYPE} \tab Customer Subtype\cr \tab 2 \tab \code{MAANTHUI} \tab Number of houses 1 - 10\cr \tab 3 \tab \code{MGEMOMV} \tab Avg size household 1 - 6\cr \tab 4 \tab \code{MGEMLEEF} \tab Average age\cr \tab 5 \tab \code{MOSHOOFD} \tab Customer main type\cr \tab 6 \tab \code{MGODRK} \tab Roman catholic \cr \tab 7 \tab \code{MGODPR} \tab Protestant ... \cr \tab 8 \tab \code{MGODOV} \tab Other religion \cr \tab 9 \tab \code{MGODGE} \tab No religion \cr \tab 10 \tab \code{MRELGE} \tab Married \cr \tab 11 \tab \code{MRELSA} \tab Living together \cr \tab 12 \tab \code{MRELOV} \tab Other relation \cr \tab 13 \tab \code{MFALLEEN} \tab Singles \cr \tab 14 \tab \code{MFGEKIND} \tab Household without children \cr \tab 15 \tab \code{MFWEKIND} \tab Household with children \cr \tab 16 \tab \code{MOPLHOOG} \tab High level education \cr \tab 17 \tab \code{MOPLMIDD} \tab Medium level education \cr \tab 18 \tab \code{MOPLLAAG} \tab Lower level education \cr \tab 19 \tab \code{MBERHOOG} \tab High status \cr \tab 20 \tab \code{MBERZELF} \tab Entrepreneur \cr \tab 21 \tab \code{MBERBOER} \tab Farmer \cr \tab 22 \tab \code{MBERMIDD} \tab Middle management \cr \tab 23 \tab \code{MBERARBG} \tab Skilled labourers \cr \tab 24 \tab \code{MBERARBO} \tab Unskilled labourers \cr \tab 25 \tab \code{MSKA} \tab Social class A \cr \tab 26 \tab \code{MSKB1} \tab Social class B1 \cr \tab 27 \tab \code{MSKB2} \tab Social class B2 \cr \tab 28 \tab \code{MSKC} \tab Social class C \cr \tab 29 \tab \code{MSKD} \tab Social class D \cr \tab 30 \tab \code{MHHUUR} \tab Rented house \cr \tab 31 \tab \code{MHKOOP} \tab Home owners \cr \tab 32 \tab \code{MAUT1} \tab 1 car \cr \tab 33 \tab \code{MAUT2} \tab 2 cars \cr \tab 34 \tab \code{MAUT0} \tab No car \cr \tab 35 \tab \code{MZFONDS} \tab National Health Service \cr \tab 36 \tab \code{MZPART} \tab Private health insurance \cr \tab 37 \tab \code{MINKM30} \tab Income >30.000 \cr \tab 38 \tab \code{MINK3045} \tab Income 30-45.000 \cr \tab 39 \tab \code{MINK4575} \tab Income 45-75.000 \cr \tab 40 \tab \code{MINK7512} \tab Income 75-122.000 \cr \tab 41 \tab \code{MINK123M} \tab Income <123.000 \cr \tab 42 \tab \code{MINKGEM} \tab Average income \cr \tab 43 \tab \code{MKOOPKLA} \tab Purchasing power class \cr \tab 44 \tab \code{PWAPART} \tab Contribution private third party insurance \cr \tab 45 \tab \code{PWABEDR} \tab Contribution third party insurance (firms) \cr \tab 46 \tab \code{PWALAND} \tab Contribution third party insurance (agriculture) \cr \tab 47 \tab \code{PPERSAUT} \tab Contribution car policies \cr \tab 48 \tab \code{PBESAUT} \tab Contribution delivery van policies \cr \tab 49 \tab \code{PMOTSCO} \tab Contribution motorcycle/scooter policies \cr \tab 50 \tab \code{PVRAAUT} \tab Contribution lorry policies \cr \tab 51 \tab \code{PAANHANG} \tab Contribution trailer policies \cr \tab 52 \tab \code{PTRACTOR} \tab Contribution tractor policies \cr \tab 53 \tab \code{PWERKT} \tab Contribution agricultural machines policies \cr \tab 54 \tab \code{PBROM} \tab Contribution moped policies \cr \tab 55 \tab \code{PLEVEN} \tab Contribution life insurances \cr \tab 56 \tab \code{PPERSONG} \tab Contribution private accident insurance policies \cr \tab 57 \tab \code{PGEZONG} \tab Contribution family accidents insurance policies \cr \tab 58 \tab \code{PWAOREG} \tab Contribution disability insurance policies \cr \tab 59 \tab \code{PBRAND} \tab Contribution fire policies \cr \tab 60 \tab \code{PZEILPL} \tab Contribution surfboard policies \cr \tab 61 \tab \code{PPLEZIER} \tab Contribution boat policies \cr \tab 62 \tab \code{PFIETS} \tab Contribution bicycle policies \cr \tab 63 \tab \code{PINBOED} \tab Contribution property insurance policies \cr \tab 64 \tab \code{PBYSTAND} \tab Contribution social security insurance policies \cr \tab 65 \tab \code{AWAPART} \tab Number of private third party insurance 1 - 12 \cr \tab 66 \tab \code{AWABEDR} \tab Number of third party insurance (firms) ... \cr \tab 67 \tab \code{AWALAND} \tab Number of third party insurance (agriculture) \cr \tab 68 \tab \code{APERSAUT} \tab Number of car policies \cr \tab 69 \tab \code{ABESAUT} \tab Number of delivery van policies \cr \tab 70 \tab \code{AMOTSCO} \tab Number of motorcycle/scooter policies \cr \tab 71 \tab \code{AVRAAUT} \tab Number of lorry policies \cr \tab 72 \tab \code{AAANHANG} \tab Number of trailer policies \cr \tab 73 \tab \code{ATRACTOR} \tab Number of tractor policies \cr \tab 74 \tab \code{AWERKT} \tab Number of agricultural machines policies \cr \tab 75 \tab \code{ABROM} \tab Number of moped policies \cr \tab 76 \tab \code{ALEVEN} \tab Number of life insurances \cr \tab 77 \tab \code{APERSONG} \tab Number of private accident insurance policies \cr \tab 78 \tab \code{AGEZONG} \tab Number of family accidents insurance policies \cr \tab 79 \tab \code{AWAOREG} \tab Number of disability insurance policies \cr \tab 80 \tab \code{ABRAND} \tab Number of fire policies \cr \tab 81 \tab \code{AZEILPL} \tab Number of surfboard policies \cr \tab 82 \tab \code{APLEZIER} \tab Number of boat policies \cr \tab 83 \tab \code{AFIETS} \tab Number of bicycle policies \cr \tab 84 \tab \code{AINBOED} \tab Number of property insurance policies \cr \tab 85 \tab \code{ABYSTAND} \tab Number of social security insurance policies \cr \tab 86 \tab \code{CARAVAN} \tab Number of mobile home policies 0 - 1 \cr } Note: All the variables starting with M are zipcode variables. They give information on the distribution of that variable, e.g., Rented house, in the zipcode area of the customer. } \details{ Information about the insurance company customers consists of 86 variables and includes product usage data and socio-demographic data derived from zip area codes. The data was supplied by the Dutch data mining company Sentient Machine Research and is based on a real world business problem. The training set contains over 5000 descriptions of customers, including the information of whether or not they have a caravan insurance policy. The test set contains 4000 customers. The test and data set are merged in the ticdata set. More information about the data set and the CoIL 2000 Challenge along with publications based on the data set can be found at \url{http://www.liacs.nl/~putten/library/cc2000/}. } \source{ \itemize{ \item UCI KDD Archive:\url{http://kdd.ics.uci.edu} \item Donor: Sentient Machine Research \cr Peter van der Putten \cr Sentient Machine Research \cr Baarsjesweg 224 \cr 1058 AA Amsterdam \cr The Netherlands \cr +31 20 6186927 \cr pvdputten@hotmail.com, putten@liacs.nl } } \references{Peter van der Putten, Michel de Ruiter, Maarten van Someren \emph{CoIL Challenge 2000 Tasks and Results: Predicting and Explaining Caravan Policy Ownership}\cr \url{http://www.liacs.nl/~putten/library/cc2000/}} \keyword{datasets} kernlab/man/inlearn.Rd0000644000175100001440000000600712117362575014414 0ustar hornikusers\name{inlearn} \alias{inlearn} \alias{inlearn,numeric-method} \title{Onlearn object initialization} \description{ Online Kernel Algorithm object \code{onlearn} initialization function. } \usage{ \S4method{inlearn}{numeric}(d, kernel = "rbfdot", kpar = list(sigma = 0.1), type = "novelty", buffersize = 1000) } \arguments{ \item{d}{the dimensionality of the data to be learned} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. For valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the \code{kpar} parameter as well.} \item{type}{the type of problem to be learned by the online algorithm : \code{classification}, \code{regression}, \code{novelty}} \item{buffersize}{the size of the buffer to be used} } \details{ The \code{inlearn} is used to initialize a blank \code{onlearn} object. } \value{ The function returns an \code{S4} object of class \code{onlearn} that can be used by the \code{onlearn} function. } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{onlearn}}, \code{\link{onlearn-class}} } \examples{ ## create toy data set x <- rbind(matrix(rnorm(100),,2),matrix(rnorm(100)+3,,2)) y <- matrix(c(rep(1,50),rep(-1,50)),,1) ## initialize onlearn object on <- inlearn(2, kernel = "rbfdot", kpar = list(sigma = 0.2), type = "classification") ## learn one data point at the time for(i in sample(1:100,100)) on <- onlearn(on,x[i,],y[i],nu=0.03,lambda=0.1) sign(predict(on,x)) } \keyword{classif} \keyword{neural} \keyword{regression} \keyword{ts} kernlab/man/prc-class.Rd0000644000175100001440000000353311304023134014632 0ustar hornikusers\name{prc-class} \docType{class} \alias{prc-class} \alias{eig} \alias{pcv} \alias{eig,prc-method} \alias{kcall,prc-method} \alias{kernelf,prc-method} \alias{pcv,prc-method} \alias{xmatrix,prc-method} \title{Class "prc"} \description{Principal Components Class} \section{Objects of class "prc"}{Objects from the class cannot be created directly but only contained in other classes.} \section{Slots}{ \describe{ \item{\code{pcv}:}{Object of class \code{"matrix"} containing the principal component vectors } \item{\code{eig}:}{Object of class \code{"vector"} containing the corresponding eigenvalues} \item{\code{kernelf}:}{Object of class \code{"kfunction"} containing the kernel function used} \item{\code{kpar}:}{Object of class \code{"list"} containing the kernel parameters used } \item{\code{xmatrix}:}{Object of class \code{"input"} containing the data matrix used } \item{\code{kcall}:}{Object of class \code{"ANY"} containing the function call } \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed on NA } } } \section{Methods}{ \describe{ \item{eig}{\code{signature(object = "prc")}: returns the eigenvalues } \item{kcall}{\code{signature(object = "prc")}: returns the performed call} \item{kernelf}{\code{signature(object = "prc")}: returns the used kernel function} \item{pcv}{\code{signature(object = "prc")}: returns the principal component vectors } \item{predict}{\code{signature(object = "prc")}: embeds new data } \item{xmatrix}{\code{signature(object = "prc")}: returns the used data matrix } } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{kpca-class}},\code{\link{kha-class}}, \code{\link{kfa-class}} } \keyword{classes} kernlab/man/promotergene.Rd0000644000175100001440000000310712117365235015464 0ustar hornikusers\name{promotergene} \alias{promotergene} \docType{data} \title{E. coli promoter gene sequences (DNA)} \description{ Promoters have a region where a protein (RNA polymerase) must make contact and the helical DNA sequence must have a valid conformation so that the two pieces of the contact region spatially align. The data contains DNA sequences of promoters and non-promoters. } \usage{data(promotergene)} \format{ A data frame with 106 observations and 58 variables. The first variable \code{Class} is a factor with levels \code{+} for a promoter gene and \code{-} for a non-promoter gene. The remaining 57 variables \code{V2 to V58} are factors describing the sequence. The DNA bases are coded as follows: \code{a} adenine \code{c} cytosine \code{g} guanine \code{t} thymine } \source{ UCI Machine Learning data repository \cr \url{ftp://ftp.ics.uci.edu/pub/machine-learning-databases/molecular-biology/promoter-gene-sequences} } \references{ Towell, G., Shavlik, J. and Noordewier, M. \cr \emph{Refinement of Approximate Domain Theories by Knowledge-Based Artificial Neural Networks.} \cr In Proceedings of the Eighth National Conference on Artificial Intelligence (AAAI-90) } \examples{ data(promotergene) ## Create classification model using Gaussian Processes prom <- gausspr(Class~.,data=promotergene,kernel="rbfdot", kpar=list(sigma=0.02),cross=4) prom ## Create model using Support Vector Machines promsv <- ksvm(Class~.,data=promotergene,kernel="laplacedot", kpar="automatic",C=60,cross=4) promsv } \keyword{datasets} kernlab/man/stringdot.Rd0000644000175100001440000000631111304023134014755 0ustar hornikusers\name{stringdot} \alias{stringdot} \title{String Kernel Functions} \description{ String kernels. } \usage{ stringdot(length = 4, lambda = 1.1, type = "spectrum", normalized = TRUE) } \arguments{ \item{length}{The length of the substrings considered} \item{lambda}{The decay factor} \item{type}{Type of string kernel, currently the following kernels are supported : \cr \code{spectrum} the kernel considers only matching substring of exactly length \eqn{n} (also know as string kernel). Each such matching substring is given a constant weight. The length parameter in this kernel has to be \eqn{length > 1}.\cr \code{boundrange} this kernel (also known as boundrange) considers only matching substrings of length less than or equal to a given number N. This type of string kernel requires a length parameter \eqn{length > 1}\cr \code{constant} The kernel considers all matching substrings and assigns constant weight (e.g. 1) to each of them. This \code{constant} kernel does not require any additional parameter.\cr \code{exponential} Exponential Decay kernel where the substring weight decays as the matching substring gets longer. The kernel requires a decay factor \eqn{ \lambda > 1}\cr \code{string} essentially identical to the spectrum kernel, only computed using a more conventional way.\cr \code{fullstring} essentially identical to the boundrange kernel only computed in a more conventional way. \cr } \item{normalized}{normalize string kernel values, (default: \code{TRUE})} } \details{ The kernel generating functions are used to initialize a kernel function which calculates the dot (inner) product between two feature vectors in a Hilbert Space. These functions or their function generating names can be passed as a \code{kernel} argument on almost all functions in \pkg{kernlab}(e.g., \code{ksvm}, \code{kpca} etc.). The string kernels calculate similarities between two strings (e.g. texts or sequences) by matching the common substring in the strings. Different types of string kernel exists and are mainly distinguished by how the matching is performed i.e. some string kernels count the exact matchings of \eqn{n} characters (spectrum kernel) between the strings, others allow gaps (mismatch kernel) etc. } \value{ Returns an S4 object of class \code{stringkernel} which extents the \code{function} class. The resulting function implements the given kernel calculating the inner (dot) product between two character vectors. \item{kpar}{a list containing the kernel parameters (hyperparameters) used.} The kernel parameters can be accessed by the \code{kpar} function. } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \note{ The \code{spectrum} and \code{boundrange} kernel are faster and more efficient implementations of the \code{string} and \code{fullstring} kernels which will be still included in \code{kernlab} for the next two versions. } \seealso{ \code{\link{dots} }, \code{\link{kernelMatrix} }, \code{\link{kernelMult}}, \code{\link{kernelPol}}} \examples{ sk <- stringdot(type="string", length=5) sk } \keyword{symbolmath} kernlab/man/ranking.Rd0000644000175100001440000001251512117365427014415 0ustar hornikusers\name{ranking} \alias{ranking} \alias{ranking,matrix-method} \alias{ranking,list-method} \alias{ranking,kernelMatrix-method} \title{Ranking} \description{ A universal ranking algorithm which assigns importance/ranking to data points given a query. } \usage{ \S4method{ranking}{matrix}(x, y, kernel ="rbfdot", kpar = list(sigma = 1), scale = FALSE, alpha = 0.99, iterations = 600, edgegraph = FALSE, convergence = FALSE ,...) \S4method{ranking}{kernelMatrix}(x, y, alpha = 0.99, iterations = 600, convergence = FALSE,...) \S4method{ranking}{list}(x, y, kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), alpha = 0.99, iterations = 600, convergence = FALSE, ...) } \arguments{ \item{x}{a matrix containing the data to be ranked, or the kernel matrix of data to be ranked or a list of character vectors} \item{y}{The index of the query point in the data matrix or a vector of length equal to the rows of the data matrix having a one at the index of the query points index and zero at all the other points.} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. For valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{scale}{If TRUE the data matrix columns are scaled to zero mean and unit variance.} \item{alpha}{ The \code{alpha} parameter takes values between 0 and 1 and is used to control the authoritative scores received from the unlabeled points. For 0 no global structure is found the algorithm ranks the points similarly to the original distance metric.} \item{iterations}{Maximum number of iterations} \item{edgegraph}{Construct edgegraph (only supported with the RBF kernel)} \item{convergence}{Include convergence matrix in results} \item{\dots}{Additional arguments} } \details{ A simple universal ranking algorithm which exploits the intrinsic global geometric structure of the data. In many real world applications this should be superior to a local method in which the data are simply ranked by pairwise Euclidean distances. Firstly a weighted network is defined on the data and an authoritative score is assigned to each query. The query points act as source nodes that continually pump their authoritative scores to the remaining points via the weighted network and the remaining points further spread the scores they received to their neighbors. This spreading process is repeated until convergence and the points are ranked according to their score at the end of the iterations. } \value{ An S4 object of class \code{ranking} which extends the \code{matrix} class. The first column of the returned matrix contains the original index of the points in the data matrix the second column contains the final score received by each point and the third column the ranking of the point. The object contains the following slots : \item{edgegraph}{Containing the edgegraph of the data points. } \item{convergence}{Containing the convergence matrix} } \references{ D. Zhou, J. Weston, A. Gretton, O. Bousquet, B. Schoelkopf \cr \emph{Ranking on Data Manifolds}\cr Advances in Neural Information Processing Systems 16.\cr MIT Press Cambridge Mass. 2004 \cr \url{http://www.kyb.mpg.de/publications/pdfs/pdf2334.pdf} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{ranking-class}}, \code{\link{specc}} } \examples{ data(spirals) ## create data from spirals ran <- spirals[rowSums(abs(spirals) < 0.55) == 2,] ## rank points according to similarity to the most upper left point ranked <- ranking(ran, 54, kernel = "rbfdot", kpar = list(sigma = 100), edgegraph = TRUE) ranked[54, 2] <- max(ranked[-54, 2]) c<-1:86 op <- par(mfrow = c(1, 2),pty="s") plot(ran) plot(ran, cex=c[ranked[,3]]/40) } \keyword{cluster} \keyword{classif} kernlab/man/dots.Rd0000644000175100001440000001005711304023134013713 0ustar hornikusers\name{dots} \alias{dots} \alias{kernels} \alias{rbfdot} \alias{polydot} \alias{tanhdot} \alias{vanilladot} \alias{laplacedot} \alias{besseldot} \alias{anovadot} \alias{fourierdot} \alias{splinedot} \alias{kpar} \alias{kfunction} \alias{show,kernel-method} \title{Kernel Functions} \description{ The kernel generating functions provided in kernlab. \cr The Gaussian RBF kernel \eqn{k(x,x') = \exp(-\sigma \|x - x'\|^2)} \cr The Polynomial kernel \eqn{k(x,x') = (scale + offset)^{degree}}\cr The Linear kernel \eqn{k(x,x') = }\cr The Hyperbolic tangent kernel \eqn{k(x, x') = \tanh(scale + offset)}\cr The Laplacian kernel \eqn{k(x,x') = \exp(-\sigma \|x - x'\|)} \cr The Bessel kernel \eqn{k(x,x') = (- Bessel_{(\nu+1)}^n \sigma \|x - x'\|^2)} \cr The ANOVA RBF kernel \eqn{k(x,x') = \sum_{1\leq i_1 \ldots < i_D \leq N} \prod_{d=1}^D k(x_{id}, {x'}_{id})} where k(x,x) is a Gaussian RBF kernel. \cr The Spline kernel \eqn{ \prod_{d=1}^D 1 + x_i x_j + x_i x_j min(x_i, x_j) - \frac{x_i + x_j}{2} min(x_i,x_j)^2 + \frac{min(x_i,x_j)^3}{3}} \\ The String kernels (see \code{stringdot}. } \usage{ rbfdot(sigma = 1) polydot(degree = 1, scale = 1, offset = 1) tanhdot(scale = 1, offset = 1) vanilladot() laplacedot(sigma = 1) besseldot(sigma = 1, order = 1, degree = 1) anovadot(sigma = 1, degree = 1) splinedot() } \arguments{ \item{sigma}{The inverse kernel width used by the Gaussian the Laplacian, the Bessel and the ANOVA kernel } \item{degree}{The degree of the polynomial, bessel or ANOVA kernel function. This has to be an positive integer.} \item{scale}{The scaling parameter of the polynomial and tangent kernel is a convenient way of normalizing patterns without the need to modify the data itself} \item{offset}{The offset used in a polynomial or hyperbolic tangent kernel} \item{order}{The order of the Bessel function to be used as a kernel} } \details{ The kernel generating functions are used to initialize a kernel function which calculates the dot (inner) product between two feature vectors in a Hilbert Space. These functions can be passed as a \code{kernel} argument on almost all functions in \pkg{kernlab}(e.g., \code{ksvm}, \code{kpca} etc). Although using one of the existing kernel functions as a \code{kernel} argument in various functions in \pkg{kernlab} has the advantage that optimized code is used to calculate various kernel expressions, any other function implementing a dot product of class \code{kernel} can also be used as a kernel argument. This allows the user to use, test and develop special kernels for a given data set or algorithm. For details on the string kernels see \code{stringdot}. } \value{ Return an S4 object of class \code{kernel} which extents the \code{function} class. The resulting function implements the given kernel calculating the inner (dot) product between two vectors. \item{kpar}{a list containing the kernel parameters (hyperparameters) used.} The kernel parameters can be accessed by the \code{kpar} function. } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \note{If the offset in the Polynomial kernel is set to $0$, we obtain homogeneous polynomial kernels, for positive values, we have inhomogeneous kernels. Note that for negative values the kernel does not satisfy Mercer's condition and thus the optimizers may fail. \cr In the Hyperbolic tangent kernel if the offset is negative the likelihood of obtaining a kernel matrix that is not positive definite is much higher (since then even some diagonal elements may be negative), hence if this kernel has to be used, the offset should always be positive. Note, however, that this is no guarantee that the kernel will be positive. } \seealso{\code{stringdot}, \code{\link{kernelMatrix} }, \code{\link{kernelMult}}, \code{\link{kernelPol}}} \examples{ rbfkernel <- rbfdot(sigma = 0.1) rbfkernel kpar(rbfkernel) ## create two vectors x <- rnorm(10) y <- rnorm(10) ## calculate dot product rbfkernel(x,y) } \keyword{symbolmath} kernlab/man/kmmd-class.Rd0000644000175100001440000000415311304023134014775 0ustar hornikusers\name{kmmd-class} \docType{class} \alias{kmmd-class} \alias{kernelf,kmmd-method} \alias{H0,kmmd-method} \alias{AsympH0,kmmd-method} \alias{Radbound,kmmd-method} \alias{Asymbound,kmmd-method} \alias{mmdstats,kmmd-method} \title{Class "kqr"} \description{The Kernel Maximum Mean Discrepancy object class} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("kmmd", ...)}. or by calling the \code{kmmd} function } \section{Slots}{ \describe{ \item{\code{kernelf}:}{Object of class \code{"kfunction"} contains the kernel function used} \item{\code{xmatrix}:}{Object of class \code{"kernelMatrix"} containing the data used } \item{H0}{Object of class \code{"logical"} contains value of : is H0 rejected (logical)} \item{\code{AsympH0}}{Object of class \code{"logical"} contains value : is H0 rejected according to the asymptotic bound (logical)} \item{\code{mmdstats}}{Object of class \code{"vector"} contains the test statistics (vector of two)} \item{\code{Radbound}}{Object of class \code{"numeric"} contains the Rademacher bound} \item{\code{Asymbound}}{Object of class \code{"numeric"} contains the asymptotic bound} } } \section{Methods}{ \describe{ \item{kernelf}{\code{signature(object = "kmmd")}: returns the kernel function used} \item{H0}{\code{signature(object = "kmmd")}: returns the value of H0 being rejected} \item{AsympH0}{\code{signature(object = "kmmd")}: returns the value of H0 being rejected according to the asymptotic bound} \item{mmdstats}{\code{signature(object = "kmmd")}: returns the values of the mmd statistics} \item{Radbound}{\code{signature(object = "kmmd")}: returns the value of the Rademacher bound} \item{Asymbound}{\code{signature(object = "kmmd")}: returns the value of the asymptotic bound} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{kmmd}}, } \examples{ # create data x <- matrix(runif(300),100) y <- matrix(runif(300)+1,100) mmdo <- kmmd(x, y) H0(mmdo) } \keyword{classes} kernlab/man/kha-class.Rd0000644000175100001440000000450312117362716014626 0ustar hornikusers\name{kha-class} \docType{class} \alias{kha-class} \alias{eig,kha-method} \alias{kcall,kha-method} \alias{kernelf,kha-method} \alias{pcv,kha-method} \alias{xmatrix,kha-method} \alias{eskm,kha-method} \title{Class "kha"} \description{ The Kernel Hebbian Algorithm class} \section{Objects objects of class "kha"}{ Objects can be created by calls of the form \code{new("kha", ...)}. or by calling the \code{kha} function. } \section{Slots}{ \describe{ \item{\code{pcv}:}{Object of class \code{"matrix"} containing the principal component vectors } \item{\code{eig}:}{Object of class \code{"vector"} containing the corresponding normalization values} \item{\code{eskm}:}{Object of class \code{"vector"} containing the kernel sum} \item{\code{kernelf}:}{Object of class \code{"kfunction"} containing the kernel function used} \item{\code{kpar}:}{Object of class \code{"list"} containing the kernel parameters used } \item{\code{xmatrix}:}{Object of class \code{"matrix"} containing the data matrix used } \item{\code{kcall}:}{Object of class \code{"ANY"} containing the function call } \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed on NA } } } \section{Methods}{ \describe{ \item{eig}{\code{signature(object = "kha")}: returns the normalization values } \item{kcall}{\code{signature(object = "kha")}: returns the performed call} \item{kernelf}{\code{signature(object = "kha")}: returns the used kernel function} \item{pcv}{\code{signature(object = "kha")}: returns the principal component vectors } \item{eskm}{\code{signature(object = "kha")}: returns the kernel sum} \item{predict}{\code{signature(object = "kha")}: embeds new data } \item{xmatrix}{\code{signature(object = "kha")}: returns the used data matrix } } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{kha}}, \code{\link{ksvm-class}}, \code{\link{kcca-class}} } \examples{ # another example using the iris data(iris) test <- sample(1:50,20) kpc <- kha(~.,data=iris[-test,-5], kernel="rbfdot", kpar=list(sigma=0.2),features=2, eta=0.001, maxiter=65) #print the principal component vectors pcv(kpc) kernelf(kpc) eig(kpc) } \keyword{classes} kernlab/man/rvm.Rd0000644000175100001440000001565412117366150013571 0ustar hornikusers\name{rvm} \alias{rvm} \alias{rvm-methods} \alias{rvm,formula-method} \alias{rvm,list-method} \alias{rvm,vector-method} \alias{rvm,kernelMatrix-method} \alias{rvm,matrix-method} \alias{show,rvm-method} \alias{predict,rvm-method} \alias{coef,rvm-method} \title{Relevance Vector Machine} \description{ The Relevance Vector Machine is a Bayesian model for regression and classification of identical functional form to the support vector machine. The \code{rvm} function currently supports only regression. } \usage{ \S4method{rvm}{formula}(x, data=NULL, ..., subset, na.action = na.omit) \S4method{rvm}{vector}(x, ...) \S4method{rvm}{matrix}(x, y, type="regression", kernel="rbfdot", kpar="automatic", alpha= ncol(as.matrix(x)), var=0.1, var.fix=FALSE, iterations=100, verbosity = 0, tol = .Machine$double.eps, minmaxdiff = 1e-3, cross = 0, fit = TRUE, ... , subset, na.action = na.omit) \S4method{rvm}{list}(x, y, type = "regression", kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), alpha = 5, var = 0.1, var.fix = FALSE, iterations = 100, verbosity = 0, tol = .Machine$double.eps, minmaxdiff = 1e-3, cross = 0, fit = TRUE, ..., subset, na.action = na.omit) } \arguments{ \item{x}{a symbolic description of the model to be fit. When not using a formula x can be a matrix or vector containing the training data or a kernel matrix of class \code{kernelMatrix} of the training data or a list of character vectors (for use with the string kernel). Note, that the intercept is always excluded, whether given in the formula or not.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which `rvm' is called from.} \item{y}{a response vector with one label for each row/component of \code{x}. Can be either a factor (for classification tasks) or a numeric vector (for regression).} \item{type}{\code{rvm} can only be used for regression at the moment.} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel "Gaussian" \item \code{polydot} Polynomial kernel \item \code{vanilladot} Linear kernel \item \code{tanhdot} Hyperbolic tangent kernel \item \code{laplacedot} Laplacian kernel \item \code{besseldot} Bessel kernel \item \code{anovadot} ANOVA RBF kernel \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. For valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". \item \code{length, lambda, normalized} for the "stringdot" kernel where length is the length of the strings considered, lambda the decay factor and normalized a logical parameter determining if the kernel evaluations should be normalized. } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well. In the case of a Radial Basis kernel function (Gaussian) kpar can also be set to the string "automatic" which uses the heuristics in \code{\link{sigest}} to calculate a good \code{sigma} value for the Gaussian RBF or Laplace kernel, from the data. (default = "automatic").} \item{alpha}{The initial alpha vector. Can be either a vector of length equal to the number of data points or a single number.} \item{var}{the initial noise variance} \item{var.fix}{Keep noise variance fix during iterations (default: FALSE)} \item{iterations}{Number of iterations allowed (default: 100)} \item{tol}{tolerance of termination criterion} \item{minmaxdiff}{termination criteria. Stop when max difference is equal to this parameter (default:1e-3) } \item{verbosity}{print information on algorithm convergence (default = FALSE)} \item{fit}{indicates whether the fitted values should be computed and included in the model or not (default: TRUE)} \item{cross}{if a integer value k>0 is specified, a k-fold cross validation on the training data is performed to assess the quality of the model: the Mean Squared Error for regression} \item{subset}{An index vector specifying the cases to be used in the training sample. (NOTE: If given, this argument must be named.)} \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} \item{\dots}{ additional parameters} } \details{The Relevance Vector Machine typically leads to sparser models then the SVM. It also performs better in many cases (specially in regression). } \value{ An S4 object of class "rvm" containing the fitted model. Accessor functions can be used to access the slots of the object which include : \item{alpha}{The resulting relevance vectors} \item{alphaindex}{ The index of the resulting relevance vectors in the data matrix} \item{nRV}{Number of relevance vectors} \item{RVindex}{The indexes of the relevance vectors} \item{error}{Training error (if \code{fit = TRUE})} ... } \references{ Tipping, M. E.\cr \emph{Sparse Bayesian learning and the relevance vector machine}\cr Journal of Machine Learning Research 1, 211-244\cr \url{http://www.jmlr.org/papers/volume1/tipping01a/tipping01a.pdf} } \author{ Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{ksvm}}} \examples{ # create data x <- seq(-20,20,0.1) y <- sin(x)/x + rnorm(401,sd=0.05) # train relevance vector machine foo <- rvm(x, y) foo # print relevance vectors alpha(foo) RVindex(foo) # predict and plot ytest <- predict(foo, x) plot(x, y, type ="l") lines(x, ytest, col="red") } \keyword{regression} \keyword{nonlinear} kernlab/man/spam.Rd0000644000175100001440000000412711304023134013703 0ustar hornikusers\name{spam} \alias{spam} \title{Spam E-mail Database} \description{A data set collected at Hewlett-Packard Labs, that classifies 4601 e-mails as spam or non-spam. In addition to this class label there are 57 variables indicating the frequency of certain words and characters in the e-mail.} \usage{data(spam)} \format{A data frame with 4601 observations and 58 variables. The first 48 variables contain the frequency of the variable name (e.g., business) in the e-mail. If the variable name starts with num (e.g., num650) the it indicates the frequency of the corresponding number (e.g., 650). The variables 49-54 indicate the frequency of the characters `;', `(', `[', `!', `\$', and `\#'. The variables 55-57 contain the average, longest and total run-length of capital letters. Variable 58 indicates the type of the mail and is either \code{"nonspam"} or \code{"spam"}, i.e. unsolicited commercial e-mail.} \details{ The data set contains 2788 e-mails classified as \code{"nonspam"} and 1813 classified as \code{"spam"}. The ``spam'' concept is diverse: advertisements for products/web sites, make money fast schemes, chain letters, pornography... This collection of spam e-mails came from the collectors' postmaster and individuals who had filed spam. The collection of non-spam e-mails came from filed work and personal e-mails, and hence the word 'george' and the area code '650' are indicators of non-spam. These are useful when constructing a personalized spam filter. One would either have to blind such non-spam indicators or get a very wide collection of non-spam to generate a general purpose spam filter. } \source{ \itemize{ \item Creators: Mark Hopkins, Erik Reeber, George Forman, Jaap Suermondt at Hewlett-Packard Labs, 1501 Page Mill Rd., Palo Alto, CA 94304 \item Donor: George Forman (gforman at nospam hpl.hp.com) 650-857-7835 } These data have been taken from the UCI Repository Of Machine Learning Databases at \url{http://www.ics.uci.edu/~mlearn/MLRepository.html}} \references{ T. Hastie, R. Tibshirani, J.H. Friedman. \emph{The Elements of Statistical Learning.} Springer, 2001. } \keyword{datasets} kernlab/man/kernelMatrix.Rd0000644000175100001440000001254111304023134015407 0ustar hornikusers\name{kernelMatrix} \alias{kernelMatrix} \alias{kernelMult} \alias{kernelPol} \alias{kernelFast} \alias{kernelPol,kernel-method} \alias{kernelMatrix,kernel-method} \alias{kernelMult,kernel-method} \alias{kernelFast,kernel-method} \alias{kernelMatrix,rbfkernel-method} \alias{kernelMatrix,polykernel-method} \alias{kernelMatrix,vanillakernel-method} \alias{kernelMatrix,tanhkernel-method} \alias{kernelMatrix,laplacekernel-method} \alias{kernelMatrix,anovakernel-method} \alias{kernelMatrix,splinekernel-method} \alias{kernelMatrix,besselkernel-method} \alias{kernelMatrix,stringkernel-method} \alias{kernelMult,rbfkernel,ANY-method} \alias{kernelMult,splinekernel,ANY-method} \alias{kernelMult,polykernel,ANY-method} \alias{kernelMult,tanhkernel,ANY-method} \alias{kernelMult,laplacekernel,ANY-method} \alias{kernelMult,besselkernel,ANY-method} \alias{kernelMult,anovakernel,ANY-method} \alias{kernelMult,vanillakernel,ANY-method} \alias{kernelMult,character,kernelMatrix-method} \alias{kernelMult,stringkernel,ANY-method} \alias{kernelPol,rbfkernel-method} \alias{kernelPol,splinekernel-method} \alias{kernelPol,polykernel-method} \alias{kernelPol,tanhkernel-method} \alias{kernelPol,vanillakernel-method} \alias{kernelPol,anovakernel-method} \alias{kernelPol,besselkernel-method} \alias{kernelPol,laplacekernel-method} \alias{kernelPol,stringkernel-method} \alias{kernelFast,rbfkernel-method} \alias{kernelFast,splinekernel-method} \alias{kernelFast,polykernel-method} \alias{kernelFast,tanhkernel-method} \alias{kernelFast,vanillakernel-method} \alias{kernelFast,anovakernel-method} \alias{kernelFast,besselkernel-method} \alias{kernelFast,laplacekernel-method} \alias{kernelFast,stringkernel-method} \alias{kernelFast,splinekernel-method} \title{Kernel Matrix functions} \description{ \code{kernelMatrix} calculates the kernel matrix \eqn{K_{ij} = k(x_i,x_j)} or \eqn{K_{ij} = k(x_i,y_j)}.\cr \code{kernelPol} computes the quadratic kernel expression \eqn{H = z_i z_j k(x_i,x_j)}, \eqn{H = z_i k_j k(x_i,y_j)}.\cr \code{kernelMult} calculates the kernel expansion \eqn{f(x_i) = \sum_{i=1}^m z_i k(x_i,x_j)}\cr \code{kernelFast} computes the kernel matrix, identical to \code{kernelMatrix}, except that it also requires the squared norm of the first argument as additional input, useful in iterative kernel matrix calculations. } \usage{ \S4method{kernelMatrix}{kernel}(kernel, x, y = NULL) \S4method{kernelPol}{kernel}(kernel, x, y = NULL, z, k = NULL) \S4method{kernelMult}{kernel}(kernel, x, y = NULL, z, blocksize = 256) \S4method{kernelFast}{kernel}(kernel, x, y, a) } \arguments{ \item{kernel}{the kernel function to be used to calculate the kernel matrix. This has to be a function of class \code{kernel}, i.e. which can be generated either one of the build in kernel generating functions (e.g., \code{rbfdot} etc.) or a user defined function of class \code{kernel} taking two vector arguments and returning a scalar.} \item{x}{a data matrix to be used to calculate the kernel matrix, or a list of vector when a \code{stringkernel} is used} \item{y}{second data matrix to calculate the kernel matrix, or a list of vector when a \code{stringkernel} is used} \item{z}{a suitable vector or matrix} \item{k}{a suitable vector or matrix} \item{a}{the squared norm of \code{x}, e.g., \code{rowSums(x^2)}} \item{blocksize}{the kernel expansion computations are done block wise to avoid storing the kernel matrix into memory. \code{blocksize} defines the size of the computational blocks.} } \details{ Common functions used during kernel based computations.\cr The \code{kernel} parameter can be set to any function, of class kernel, which computes the inner product in feature space between two vector arguments. \pkg{kernlab} provides the most popular kernel functions which can be initialized by using the following functions: \itemize{ \item \code{rbfdot} Radial Basis kernel function \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} the Spline kernel } (see example.) \code{kernelFast} is mainly used in situations where columns of the kernel matrix are computed per invocation. In these cases, evaluating the norm of each row-entry over and over again would cause significant computational overhead. } \value{ \code{kernelMatrix} returns a symmetric diagonal semi-definite matrix.\cr \code{kernelPol} returns a matrix.\cr \code{kernelMult} usually returns a one-column matrix. } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{rbfdot}}, \code{\link{polydot}}, \code{\link{tanhdot}}, \code{\link{vanilladot}}} \examples{ ## use the spam data data(spam) dt <- as.matrix(spam[c(10:20,3000:3010),-58]) ## initialize kernel function rbf <- rbfdot(sigma = 0.05) rbf ## calculate kernel matrix kernelMatrix(rbf, dt) yt <- as.matrix(as.integer(spam[c(10:20,3000:3010),58])) yt[yt==2] <- -1 ## calculate the quadratic kernel expression kernelPol(rbf, dt, ,yt) ## calculate the kernel expansion kernelMult(rbf, dt, ,yt) } \keyword{algebra} \keyword{array} kernlab/man/gausspr-class.Rd0000644000175100001440000001041212055335061015535 0ustar hornikusers\name{gausspr-class} \docType{class} \alias{gausspr-class} \alias{alpha,gausspr-method} \alias{cross,gausspr-method} \alias{error,gausspr-method} \alias{kcall,gausspr-method} \alias{kernelf,gausspr-method} \alias{kpar,gausspr-method} \alias{lev,gausspr-method} \alias{type,gausspr-method} \alias{alphaindex,gausspr-method} \alias{xmatrix,gausspr-method} \alias{ymatrix,gausspr-method} \alias{scaling,gausspr-method} \title{Class "gausspr"} \description{The Gaussian Processes object class} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("gausspr", ...)}. or by calling the \code{gausspr} function } \section{Slots}{ \describe{ \item{\code{tol}:}{Object of class \code{"numeric"} contains tolerance of termination criteria} \item{\code{kernelf}:}{Object of class \code{"kfunction"} contains the kernel function used} \item{\code{kpar}:}{Object of class \code{"list"} contains the kernel parameter used } \item{\code{kcall}:}{Object of class \code{"list"} contains the used function call } \item{\code{type}:}{Object of class \code{"character"} contains type of problem } \item{\code{terms}:}{Object of class \code{"ANY"} contains the terms representation of the symbolic model used (when using a formula)} \item{\code{xmatrix}:}{Object of class \code{"input"} containing the data matrix used } \item{\code{ymatrix}:}{Object of class \code{"output"} containing the response matrix} \item{\code{fitted}:}{Object of class \code{"output"} containing the fitted values } \item{\code{lev}:}{Object of class \code{"vector"} containing the levels of the response (in case of classification) } \item{\code{nclass}:}{Object of class \code{"numeric"} containing the number of classes (in case of classification) } \item{\code{alpha}:}{Object of class \code{"listI"} containing the computes alpha values } \item{\code{alphaindex}}{Object of class \code{"list"} containing the indexes for the alphas in various classes (in multi-class problems).} \item{\code{sol}}{Object of class \code{"matrix"} containing the solution to the Gaussian Process formulation, it is used to compute the variance in regression problems.} \item{\code{scaling}}{Object of class \code{"ANY"} containing the scaling coefficients of the data (when case \code{scaled = TRUE} is used).} \item{\code{nvar}:}{Object of class \code{"numeric"} containing the computed variance} \item{\code{error}:}{Object of class \code{"numeric"} containing the training error} \item{\code{cross}:}{Object of class \code{"numeric"} containing the cross validation error} \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed in NA } } } \section{Methods}{ \describe{ \item{alpha}{\code{signature(object = "gausspr")}: returns the alpha vector} \item{cross}{\code{signature(object = "gausspr")}: returns the cross validation error } \item{error}{\code{signature(object = "gausspr")}: returns the training error } \item{fitted}{\code{signature(object = "vm")}: returns the fitted values } \item{kcall}{\code{signature(object = "gausspr")}: returns the call performed} \item{kernelf}{\code{signature(object = "gausspr")}: returns the kernel function used} \item{kpar}{\code{signature(object = "gausspr")}: returns the kernel parameter used} \item{lev}{\code{signature(object = "gausspr")}: returns the response levels (in classification) } \item{type}{\code{signature(object = "gausspr")}: returns the type of problem} \item{xmatrix}{\code{signature(object = "gausspr")}: returns the data matrix used} \item{ymatrix}{\code{signature(object = "gausspr")}: returns the response matrix used} \item{scaling}{\code{signature(object = "gausspr")}: returns the scaling coefficients of the data (when \code{scaled = TRUE} is used)} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{gausspr}}, \code{\link{ksvm-class}}, \code{\link{vm-class}} } \examples{ # train model data(iris) test <- gausspr(Species~.,data=iris,var=2) test alpha(test) error(test) lev(test) } \keyword{classes} kernlab/man/onlearn-class.Rd0000644000175100001440000000672412117365114015523 0ustar hornikusers\name{onlearn-class} \docType{class} \alias{onlearn-class} \alias{alpha,onlearn-method} \alias{b,onlearn-method} \alias{buffer,onlearn-method} \alias{fit,onlearn-method} \alias{kernelf,onlearn-method} \alias{kpar,onlearn-method} \alias{predict,onlearn-method} \alias{rho,onlearn-method} \alias{rho} \alias{show,onlearn-method} \alias{type,onlearn-method} \alias{xmatrix,onlearn-method} \alias{buffer} \title{Class "onlearn"} \description{ The class of objects used by the Kernel-based Online learning algorithms} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("onlearn", ...)}. or by calls to the function \code{inlearn}. } \section{Slots}{ \describe{ \item{\code{kernelf}:}{Object of class \code{"function"} containing the used kernel function} \item{\code{buffer}:}{Object of class \code{"numeric"} containing the size of the buffer} \item{\code{kpar}:}{Object of class \code{"list"} containing the hyperparameters of the kernel function.} \item{\code{xmatrix}:}{Object of class \code{"matrix"} containing the data points (similar to support vectors) } \item{\code{fit}:}{Object of class \code{"numeric"} containing the decision function value of the last data point} \item{\code{onstart}:}{Object of class \code{"numeric"} used for indexing } \item{\code{onstop}:}{Object of class \code{"numeric"} used for indexing} \item{\code{alpha}:}{Object of class \code{"ANY"} containing the model parameters} \item{\code{rho}:}{Object of class \code{"numeric"} containing model parameter} \item{\code{b}:}{Object of class \code{"numeric"} containing the offset} \item{\code{pattern}:}{Object of class \code{"factor"} used for dealing with factors} \item{\code{type}:}{Object of class \code{"character"} containing the problem type (classification, regression, or novelty } } } \section{Methods}{ \describe{ \item{alpha}{\code{signature(object = "onlearn")}: returns the model parameters} \item{b}{\code{signature(object = "onlearn")}: returns the offset } \item{buffer}{\code{signature(object = "onlearn")}: returns the buffer size} \item{fit}{\code{signature(object = "onlearn")}: returns the last decision function value} \item{kernelf}{\code{signature(object = "onlearn")}: return the kernel function used} \item{kpar}{\code{signature(object = "onlearn")}: returns the hyper-parameters used} \item{onlearn}{\code{signature(obj = "onlearn")}: the learning function} \item{predict}{\code{signature(object = "onlearn")}: the predict function} \item{rho}{\code{signature(object = "onlearn")}: returns model parameter} \item{show}{\code{signature(object = "onlearn")}: show function} \item{type}{\code{signature(object = "onlearn")}: returns the type of problem} \item{xmatrix}{\code{signature(object = "onlearn")}: returns the stored data points} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{onlearn}}, \code{\link{inlearn}} } \examples{ ## create toy data set x <- rbind(matrix(rnorm(100),,2),matrix(rnorm(100)+3,,2)) y <- matrix(c(rep(1,50),rep(-1,50)),,1) ## initialize onlearn object on <- inlearn(2,kernel="rbfdot",kpar=list(sigma=0.2), type="classification") ## learn one data point at the time for(i in sample(1:100,100)) on <- onlearn(on,x[i,],y[i],nu=0.03,lambda=0.1) sign(predict(on,x)) } \keyword{classes} kernlab/man/ranking-class.Rd0000644000175100001440000000261612117365252015515 0ustar hornikusers\name{ranking-class} \docType{class} \alias{ranking-class} \alias{edgegraph} \alias{convergence} \alias{convergence,ranking-method} \alias{edgegraph,ranking-method} \alias{show,ranking-method} \title{Class "ranking"} \description{Object of the class \code{"ranking"} are created from the \code{ranking} function and extend the class \code{matrix}} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("ranking", ...)}. } \section{Slots}{ \describe{ \item{\code{.Data}:}{Object of class \code{"matrix"} containing the data ranking and scores} \item{\code{convergence}:}{Object of class \code{"matrix"} containing the convergence matrix} \item{\code{edgegraph}:}{Object of class \code{"matrix"} containing the edgegraph} } } \section{Extends}{ Class \code{"matrix"}, directly. } \section{Methods}{ \describe{ \item{show}{\code{signature(object = "ranking")}: displays the ranking score matrix} } } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at} } \seealso{ \code{\link{ranking}} } \examples{ data(spirals) ## create data set to be ranked ran<-spirals[rowSums(abs(spirals)<0.55)==2,] ## rank points according to "relevance" to point 54 (up left) ranked<-ranking(ran,54,kernel="rbfdot", kpar=list(sigma=100),edgegraph=TRUE) ranked edgegraph(ranked)[1:10,1:10] } \keyword{classes} kernlab/man/kernel-class.Rd0000644000175100001440000000422311304023134015323 0ustar hornikusers\name{kernel-class} \docType{class} \alias{rbfkernel-class} \alias{polykernel-class} \alias{vanillakernel-class} \alias{tanhkernel-class} \alias{anovakernel-class} \alias{besselkernel-class} \alias{laplacekernel-class} \alias{splinekernel-class} \alias{stringkernel-class} \alias{fourierkernel-class} \alias{kfunction-class} \alias{kernel-class} \alias{kpar,kernel-method} \title{Class "kernel" "rbfkernel" "polykernel", "tanhkernel", "vanillakernel"} \description{ The built-in kernel classes in \pkg{kernlab}} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("rbfkernel")}, \code{new{"polykernel"}}, \code{new{"tanhkernel"}}, \code{new{"vanillakernel"}}, \code{new{"anovakernel"}}, \code{new{"besselkernel"}}, \code{new{"laplacekernel"}}, \code{new{"splinekernel"}}, \code{new{"stringkernel"}} or by calling the \code{rbfdot}, \code{polydot}, \code{tanhdot}, \code{vanilladot}, \code{anovadot}, \code{besseldot}, \code{laplacedot}, \code{splinedot}, \code{stringdot} functions etc.. } \section{Slots}{ \describe{ \item{\code{.Data}:}{Object of class \code{"function"} containing the kernel function } \item{\code{kpar}:}{Object of class \code{"list"} containing the kernel parameters } } } \section{Extends}{ Class \code{"kernel"}, directly. Class \code{"function"}, by class \code{"kernel"}. } \section{Methods}{ \describe{ \item{kernelMatrix}{\code{signature(kernel = "rbfkernel", x = "matrix")}: computes the kernel matrix} \item{kernelMult}{\code{signature(kernel = "rbfkernel", x = "matrix")}: computes the quadratic kernel expression} \item{kernelPol}{\code{signature(kernel = "rbfkernel", x = "matrix")}: computes the kernel expansion} \item{kernelFast}{\code{signature(kernel = "rbfkernel", x = "matrix"),,a}: computes parts or the full kernel matrix, mainly used in kernel algorithms where columns of the kernel matrix are computed per invocation } } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at} } \seealso{ \code{\link{dots}} } \examples{ rbfkernel <- rbfdot(sigma = 0.1) rbfkernel is(rbfkernel) kpar(rbfkernel) } \keyword{classes} kernlab/man/lssvm-class.Rd0000644000175100001440000001040611304023134015207 0ustar hornikusers\name{lssvm-class} \docType{class} \alias{lssvm-class} \alias{alpha,lssvm-method} \alias{b,lssvm-method} \alias{cross,lssvm-method} \alias{error,lssvm-method} \alias{kcall,lssvm-method} \alias{kernelf,lssvm-method} \alias{kpar,lssvm-method} \alias{param,lssvm-method} \alias{lev,lssvm-method} \alias{type,lssvm-method} \alias{alphaindex,lssvm-method} \alias{xmatrix,lssvm-method} \alias{ymatrix,lssvm-method} \alias{scaling,lssvm-method} \alias{nSV,lssvm-method} \title{Class "lssvm"} \description{The Gaussian Processes object } \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("lssvm", ...)}. or by calling the \code{lssvm} function } \section{Slots}{ \describe{ \item{\code{kernelf}:}{Object of class \code{"kfunction"} contains the kernel function used} \item{\code{kpar}:}{Object of class \code{"list"} contains the kernel parameter used } \item{\code{param}:}{Object of class \code{"list"} contains the regularization parameter used.} \item{\code{kcall}:}{Object of class \code{"call"} contains the used function call } \item{\code{type}:}{Object of class \code{"character"} contains type of problem } \item{\code{coef}:}{Object of class \code{"ANY"} contains the model parameter } \item{\code{terms}:}{Object of class \code{"ANY"} contains the terms representation of the symbolic model used (when using a formula)} \item{\code{xmatrix}:}{Object of class \code{"matrix"} containing the data matrix used } \item{\code{ymatrix}:}{Object of class \code{"output"} containing the response matrix} \item{\code{fitted}:}{Object of class \code{"output"} containing the fitted values } \item{\code{b}:}{Object of class \code{"numeric"} containing the offset } \item{\code{lev}:}{Object of class \code{"vector"} containing the levels of the response (in case of classification) } \item{\code{scaling}:}{Object of class \code{"ANY"} containing the scaling information performed on the data} \item{\code{nclass}:}{Object of class \code{"numeric"} containing the number of classes (in case of classification) } \item{\code{alpha}:}{Object of class \code{"listI"} containing the computes alpha values } \item{\code{alphaindex}}{Object of class \code{"list"} containing the indexes for the alphas in various classes (in multi-class problems).} \item{\code{error}:}{Object of class \code{"numeric"} containing the training error} \item{\code{cross}:}{Object of class \code{"numeric"} containing the cross validation error} \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed in NA } \item{\code{nSV}:}{Object of class \code{"numeric"} containing the number of model parameters } } } \section{Methods}{ \describe{ \item{alpha}{\code{signature(object = "lssvm")}: returns the alpha vector} \item{cross}{\code{signature(object = "lssvm")}: returns the cross validation error } \item{error}{\code{signature(object = "lssvm")}: returns the training error } \item{fitted}{\code{signature(object = "vm")}: returns the fitted values } \item{kcall}{\code{signature(object = "lssvm")}: returns the call performed} \item{kernelf}{\code{signature(object = "lssvm")}: returns the kernel function used} \item{kpar}{\code{signature(object = "lssvm")}: returns the kernel parameter used} \item{param}{\code{signature(object = "lssvm")}: returns the regularization parameter used} \item{lev}{\code{signature(object = "lssvm")}: returns the response levels (in classification) } \item{type}{\code{signature(object = "lssvm")}: returns the type of problem} \item{scaling}{\code{signature(object = "ksvm")}: returns the scaling values } \item{xmatrix}{\code{signature(object = "lssvm")}: returns the data matrix used} \item{ymatrix}{\code{signature(object = "lssvm")}: returns the response matrix used} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{lssvm}}, \code{\link{ksvm-class}} } \examples{ # train model data(iris) test <- lssvm(Species~.,data=iris,var=2) test alpha(test) error(test) lev(test) } \keyword{classes} kernlab/man/kfa.Rd0000644000175100001440000001115012117362655013517 0ustar hornikusers\name{kfa} \alias{kfa} \alias{kfa,formula-method} \alias{kfa,matrix-method} \alias{show,kfa-method} \alias{coef,kfa-method} \title{Kernel Feature Analysis} \description{ The Kernel Feature Analysis algorithm is an algorithm for extracting structure from possibly high-dimensional data sets. Similar to \code{kpca} a new basis for the data is found. The data can then be projected on the new basis. } \usage{ \S4method{kfa}{formula}(x, data = NULL, na.action = na.omit, ...) \S4method{kfa}{matrix}(x, kernel = "rbfdot", kpar = list(sigma = 0.1), features = 0, subset = 59, normalize = TRUE, na.action = na.omit) } \arguments{ \item{x}{ The data matrix indexed by row or a formula describing the model. Note, that an intercept is always included, whether given in the formula or not.} \item{data}{an optional data frame containing the variables in the model (when using a formula).} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes an inner product in feature space between two vector arguments. \pkg{kernlab} provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{features}{Number of features (principal components) to return. (default: 0 , all)} \item{subset}{the number of features sampled (used) from the data set} \item{normalize}{normalize the feature selected (default: TRUE)} \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} \item{\dots}{ additional parameters} } \details{ Kernel Feature analysis is similar to Kernel PCA, but instead of extracting eigenvectors of the training dataset in feature space, it approximates the eigenvectors by selecting training patterns which are good basis vectors for the training set. It works by choosing a fixed size subset of the data set and scaling it to unit length (under the kernel). It then chooses the features that maximize the value of the inner product (kernel function) with the rest of the patterns. } \value{ \code{kfa} returns an object of class \code{kfa} containing the features selected by the algorithm. \item{xmatrix}{contains the features selected} \item{alpha}{contains the sparse alpha vector} The \code{predict} function can be used to embed new data points into to the selected feature base. } \references{Alex J. Smola, Olvi L. Mangasarian and Bernhard Schoelkopf\cr \emph{Sparse Kernel Feature Analysis}\cr Data Mining Institute Technical Report 99-04, October 1999\cr \url{ftp://ftp.cs.wisc.edu/pub/dmi/tech-reports/99-04.ps} } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{kpca}}, \code{\link{kfa-class}}} \examples{ data(promotergene) f <- kfa(~.,data=promotergene,features=2,kernel="rbfdot", kpar=list(sigma=0.01)) plot(predict(f,promotergene),col=as.numeric(promotergene[,1])) } \keyword{cluster} kernlab/man/ksvm.Rd0000644000175100001440000003660712560414652013751 0ustar hornikusers\name{ksvm} \alias{ksvm} \alias{ksvm,formula-method} \alias{ksvm,vector-method} \alias{ksvm,matrix-method} \alias{ksvm,kernelMatrix-method} \alias{ksvm,list-method} \alias{show,ksvm-method} \alias{coef,ksvm-method} \title{Support Vector Machines} \description{ Support Vector Machines are an excellent tool for classification, novelty detection, and regression. \code{ksvm} supports the well known C-svc, nu-svc, (classification) one-class-svc (novelty) eps-svr, nu-svr (regression) formulations along with native multi-class classification formulations and the bound-constraint SVM formulations.\cr \code{ksvm} also supports class-probabilities output and confidence intervals for regression. } \usage{ \S4method{ksvm}{formula}(x, data = NULL, ..., subset, na.action = na.omit, scaled = TRUE) \S4method{ksvm}{vector}(x, ...) \S4method{ksvm}{matrix}(x, y = NULL, scaled = TRUE, type = NULL, kernel ="rbfdot", kpar = "automatic", C = 1, nu = 0.2, epsilon = 0.1, prob.model = FALSE, class.weights = NULL, cross = 0, fit = TRUE, cache = 40, tol = 0.001, shrinking = TRUE, ..., subset, na.action = na.omit) \S4method{ksvm}{kernelMatrix}(x, y = NULL, type = NULL, C = 1, nu = 0.2, epsilon = 0.1, prob.model = FALSE, class.weights = NULL, cross = 0, fit = TRUE, cache = 40, tol = 0.001, shrinking = TRUE, ...) \S4method{ksvm}{list}(x, y = NULL, type = NULL, kernel = "stringdot", kpar = list(length = 4, lambda = 0.5), C = 1, nu = 0.2, epsilon = 0.1, prob.model = FALSE, class.weights = NULL, cross = 0, fit = TRUE, cache = 40, tol = 0.001, shrinking = TRUE, ..., na.action = na.omit) } \arguments{ \item{x}{a symbolic description of the model to be fit. When not using a formula x can be a matrix or vector containing the training data or a kernel matrix of class \code{kernelMatrix} of the training data or a list of character vectors (for use with the string kernel). Note, that the intercept is always excluded, whether given in the formula or not.} \item{data}{an optional data frame containing the training data, when using a formula. By default the data is taken from the environment which `ksvm' is called from.} \item{y}{a response vector with one label for each row/component of \code{x}. Can be either a factor (for classification tasks) or a numeric vector (for regression).} \item{scaled}{A logical vector indicating the variables to be scaled. If \code{scaled} is of length 1, the value is recycled as many times as needed and all non-binary variables are scaled. Per default, data are scaled internally (both \code{x} and \code{y} variables) to zero mean and unit variance. The center and scale values are returned and used for later predictions.} \item{type}{\code{ksvm} can be used for classification , for regression, or for novelty detection. Depending on whether \code{y} is a factor or not, the default setting for \code{type} is \code{C-svc} or \code{eps-svr}, respectively, but can be overwritten by setting an explicit value.\cr Valid options are: \itemize{ \item \code{C-svc} C classification \item \code{nu-svc} nu classification \item \code{C-bsvc} bound-constraint svm classification \item \code{spoc-svc} Crammer, Singer native multi-class \item \code{kbb-svc} Weston, Watkins native multi-class \item \code{one-svc} novelty detection \item \code{eps-svr} epsilon regression \item \code{nu-svr} nu regression \item \code{eps-bsvr} bound-constraint svm regression } } \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes the inner product in feature space between two vector arguments (see \code{\link{kernels}}). \cr kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel "Gaussian" \item \code{polydot} Polynomial kernel \item \code{vanilladot} Linear kernel \item \code{tanhdot} Hyperbolic tangent kernel \item \code{laplacedot} Laplacian kernel \item \code{besseldot} Bessel kernel \item \code{anovadot} ANOVA RBF kernel \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } Setting the kernel parameter to "matrix" treats \code{x} as a kernel matrix calling the \code{kernelMatrix} interface.\cr The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. For valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". \item \code{length, lambda, normalized} for the "stringdot" kernel where length is the length of the strings considered, lambda the decay factor and normalized a logical parameter determining if the kernel evaluations should be normalized. } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well. In the case of a Radial Basis kernel function (Gaussian) kpar can also be set to the string "automatic" which uses the heuristics in \code{\link{sigest}} to calculate a good \code{sigma} value for the Gaussian RBF or Laplace kernel, from the data. (default = "automatic").} \item{C}{cost of constraints violation (default: 1) this is the `C'-constant of the regularization term in the Lagrange formulation.} \item{nu}{parameter needed for \code{nu-svc}, \code{one-svc}, and \code{nu-svr}. The \code{nu} parameter sets the upper bound on the training error and the lower bound on the fraction of data points to become Support Vectors (default: 0.2).} \item{epsilon}{epsilon in the insensitive-loss function used for \code{eps-svr}, \code{nu-svr} and \code{eps-bsvm} (default: 0.1)} \item{prob.model}{if set to \code{TRUE} builds a model for calculating class probabilities or in case of regression, calculates the scaling parameter of the Laplacian distribution fitted on the residuals. Fitting is done on output data created by performing a 3-fold cross-validation on the training data. For details see references. (default: \code{FALSE})} \item{class.weights}{a named vector of weights for the different classes, used for asymmetric class sizes. Not all factor levels have to be supplied (default weight: 1). All components have to be named.} \item{cache}{cache memory in MB (default 40)} \item{tol}{tolerance of termination criterion (default: 0.001)} \item{shrinking}{option whether to use the shrinking-heuristics (default: \code{TRUE})} \item{cross}{if a integer value k>0 is specified, a k-fold cross validation on the training data is performed to assess the quality of the model: the accuracy rate for classification and the Mean Squared Error for regression} \item{fit}{indicates whether the fitted values should be computed and included in the model or not (default: \code{TRUE})} \item{\dots}{additional parameters for the low level fitting function} \item{subset}{An index vector specifying the cases to be used in the training sample. (NOTE: If given, this argument must be named.)} \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} } \value{ An S4 object of class \code{"ksvm"} containing the fitted model, Accessor functions can be used to access the slots of the object (see examples) which include: \item{alpha}{The resulting support vectors, (alpha vector) (possibly scaled).} \item{alphaindex}{The index of the resulting support vectors in the data matrix. Note that this index refers to the pre-processed data (after the possible effect of \code{na.omit} and \code{subset})} \item{coef}{The corresponding coefficients times the training labels.} \item{b}{The negative intercept.} \item{nSV}{The number of Support Vectors} \item{obj}{The value of the objective function. In case of one-against-one classification this is a vector of values} \item{error}{Training error} \item{cross}{Cross validation error, (when cross > 0)} \item{prob.model}{Contains the width of the Laplacian fitted on the residuals in case of regression, or the parameters of the sigmoid fitted on the decision values in case of classification.} } \details{ \code{ksvm} uses John Platt's SMO algorithm for solving the SVM QP problem an most SVM formulations. On the \code{spoc-svc}, \code{kbb-svc}, \code{C-bsvc} and \code{eps-bsvr} formulations a chunking algorithm based on the TRON QP solver is used. \cr For multiclass-classification with \eqn{k} classes, \eqn{k > 2}, \code{ksvm} uses the `one-against-one'-approach, in which \eqn{k(k-1)/2} binary classifiers are trained; the appropriate class is found by a voting scheme, The \code{spoc-svc} and the \code{kbb-svc} formulations deal with the multiclass-classification problems by solving a single quadratic problem involving all the classes.\cr If the predictor variables include factors, the formula interface must be used to get a correct model matrix. \cr In classification when \code{prob.model} is \code{TRUE} a 3-fold cross validation is performed on the data and a sigmoid function is fitted on the resulting decision values \eqn{f}. The data can be passed to the \code{ksvm} function in a \code{matrix} or a \code{data.frame}, in addition \code{ksvm} also supports input in the form of a kernel matrix of class \code{kernelMatrix} or as a list of character vectors where a string kernel has to be used.\cr The \code{plot} function for binary classification \code{ksvm} objects displays a contour plot of the decision values with the corresponding support vectors highlighted.\cr The predict function can return class probabilities for classification problems by setting the \code{type} parameter to "probabilities". \cr The problem of model selection is partially addressed by an empirical observation for the RBF kernels (Gaussian , Laplace) where the optimal values of the \eqn{sigma} width parameter are shown to lie in between the 0.1 and 0.9 quantile of the \eqn{\|x- x'\|} statistics. When using an RBF kernel and setting \code{kpar} to "automatic", \code{ksvm} uses the \code{sigest} function to estimate the quantiles and uses the median of the values. } \note{Data is scaled internally by default, usually yielding better results.} \references{ \itemize{ \item Chang Chih-Chung, Lin Chih-Jen\cr \emph{LIBSVM: a library for Support Vector Machines}\cr \url{http://www.csie.ntu.edu.tw/~cjlin/libsvm} \item Chih-Wei Hsu, Chih-Jen Lin\cr \emph{BSVM} \url{http://www.csie.ntu.edu.tw/~cjlin/bsvm/} \item J. Platt\cr \emph{Probabilistic outputs for support vector machines and comparison to regularized likelihood methods} \cr Advances in Large Margin Classifiers, A. Smola, P. Bartlett, B. Schoelkopf and D. Schuurmans, Eds. Cambridge, MA: MIT Press, 2000.\cr \url{http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.41.1639} \item H.-T. Lin, C.-J. Lin and R. C. Weng\cr \emph{A note on Platt's probabilistic outputs for support vector machines}\cr \url{http://www.csie.ntu.edu.tw/~htlin/paper/doc/plattprob.pdf} \item C.-W. Hsu and C.-J. Lin \cr \emph{A comparison on methods for multi-class support vector machines}\cr IEEE Transactions on Neural Networks, 13(2002) 415-425.\cr \url{http://www.csie.ntu.edu.tw/~cjlin/papers/multisvm.ps.gz} \item K. Crammer, Y. Singer\cr \emph{On the learnability and design of output codes for multiclass prolems}\cr Computational Learning Theory, 35-46, 2000.\cr \url{http://webee.technion.ac.il/people/koby/publications/ecoc-mlj02.pdf} \item J. Weston, C. Watkins\cr \emph{Multi-class support vector machines} \cr In M. Verleysen, Proceedings of ESANN99 Brussels, 1999\cr \url{http://citeseer.ist.psu.edu/8884.html} } } \author{ Alexandros Karatzoglou (SMO optimizers in C++ by Chih-Chung Chang & Chih-Jen Lin)\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at} } \seealso{\code{\link{predict.ksvm}}, \code{\link{ksvm-class}}, \code{\link{couple}} } \keyword{methods} \keyword{regression} \keyword{nonlinear} \keyword{classif} \keyword{neural} \examples{ ## simple example using the spam data set data(spam) ## create test and training set index <- sample(1:dim(spam)[1]) spamtrain <- spam[index[1:floor(dim(spam)[1]/2)], ] spamtest <- spam[index[((ceiling(dim(spam)[1]/2)) + 1):dim(spam)[1]], ] ## train a support vector machine filter <- ksvm(type~.,data=spamtrain,kernel="rbfdot", kpar=list(sigma=0.05),C=5,cross=3) filter ## predict mail type on the test set mailtype <- predict(filter,spamtest[,-58]) ## Check results table(mailtype,spamtest[,58]) ## Another example with the famous iris data data(iris) ## Create a kernel function using the build in rbfdot function rbf <- rbfdot(sigma=0.1) rbf ## train a bound constraint support vector machine irismodel <- ksvm(Species~.,data=iris,type="C-bsvc", kernel=rbf,C=10,prob.model=TRUE) irismodel ## get fitted values fitted(irismodel) ## Test on the training set with probabilities as output predict(irismodel, iris[,-5], type="probabilities") ## Demo of the plot function x <- rbind(matrix(rnorm(120),,2),matrix(rnorm(120,mean=3),,2)) y <- matrix(c(rep(1,60),rep(-1,60))) svp <- ksvm(x,y,type="C-svc") plot(svp,data=x) ### Use kernelMatrix K <- as.kernelMatrix(crossprod(t(x))) svp2 <- ksvm(K, y, type="C-svc") svp2 # test data xtest <- rbind(matrix(rnorm(20),,2),matrix(rnorm(20,mean=3),,2)) # test kernel matrix i.e. inner/kernel product of test data with # Support Vectors Ktest <- as.kernelMatrix(crossprod(t(xtest),t(x[SVindex(svp2), ]))) predict(svp2, Ktest) #### Use custom kernel k <- function(x,y) {(sum(x*y) +1)*exp(-0.001*sum((x-y)^2))} class(k) <- "kernel" data(promotergene) ## train svm using custom kernel gene <- ksvm(Class~.,data=promotergene[c(1:20, 80:100),],kernel=k, C=5,cross=5) gene #### Use text with string kernels data(reuters) is(reuters) tsv <- ksvm(reuters,rlabels,kernel="stringdot", kpar=list(length=5),cross=3,C=10) tsv ## regression # create data x <- seq(-20,20,0.1) y <- sin(x)/x + rnorm(401,sd=0.03) # train support vector machine regm <- ksvm(x,y,epsilon=0.01,kpar=list(sigma=16),cross=3) plot(x,y,type="l") lines(x,predict(regm,x),col="red") } kernlab/man/inchol.Rd0000644000175100001440000001025011304023134014211 0ustar hornikusers\name{inchol} \alias{inchol} \alias{inchol,matrix-method} %- Also NEED an '\alias' for EACH other topic documented here. \title{Incomplete Cholesky decomposition} \description{ \code{inchol} computes the incomplete Cholesky decomposition of the kernel matrix from a data matrix. } \usage{ inchol(x, kernel="rbfdot", kpar=list(sigma=0.1), tol = 0.001, maxiter = dim(x)[1], blocksize = 50, verbose = 0) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{The data matrix indexed by row} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class \code{kernel}, which computes the inner product in feature space between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well. } \item{tol}{algorithm stops when remaining pivots bring less accuracy then \code{tol} (default: 0.001)} \item{maxiter}{maximum number of iterations and columns in \eqn{Z}} \item{blocksize}{add this many columns to matrix per iteration} \item{verbose}{print info on algorithm convergence} } \details{An incomplete cholesky decomposition calculates \eqn{Z} where \eqn{K= ZZ'} \eqn{K} being the kernel matrix. Since the rank of a kernel matrix is usually low, \eqn{Z} tends to be smaller then the complete kernel matrix. The decomposed matrix can be used to create memory efficient kernel-based algorithms without the need to compute and store a complete kernel matrix in memory.} \value{ An S4 object of class "inchol" which is an extension of the class "matrix". The object is the decomposed kernel matrix along with the slots : \item{pivots}{Indices on which pivots where done} \item{diagresidues}{Residuals left on the diagonal} \item{maxresiduals}{Residuals picked for pivoting} slots can be accessed either by \code{object@slot} or by accessor functions with the same name (e.g., \code{pivots(object))}} \references{ Francis R. Bach, Michael I. Jordan\cr \emph{Kernel Independent Component Analysis}\cr Journal of Machine Learning Research 3, 1-48\cr \url{http://www.jmlr.org/papers/volume3/bach02a/bach02a.pdf} } \author{Alexandros Karatzoglou (based on Matlab code by S.V.N. (Vishy) Vishwanathan and Alex Smola)\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{csi}}, \code{\link{inchol-class}}, \code{\link{chol}}} \examples{ data(iris) datamatrix <- as.matrix(iris[,-5]) # initialize kernel function rbf <- rbfdot(sigma=0.1) rbf Z <- inchol(datamatrix,kernel=rbf) dim(Z) pivots(Z) # calculate kernel matrix K <- crossprod(t(Z)) # difference between approximated and real kernel matrix (K - kernelMatrix(kernel=rbf, datamatrix))[6,] } \keyword{methods} \keyword{algebra} \keyword{array} kernlab/man/kha.Rd0000644000175100001440000001160412117362753013524 0ustar hornikusers\name{kha} \alias{kha} \alias{kha,formula-method} \alias{kha,matrix-method} \alias{predict,kha-method} \encoding{latin1} \title{Kernel Principal Components Analysis} \description{ Kernel Hebbian Algorithm is a nonlinear iterative algorithm for principal component analysis.} \usage{ \S4method{kha}{formula}(x, data = NULL, na.action, ...) \S4method{kha}{matrix}(x, kernel = "rbfdot", kpar = list(sigma = 0.1), features = 5, eta = 0.005, th = 1e-4, maxiter = 10000, verbose = FALSE, na.action = na.omit, ...) } \arguments{ \item{x}{ The data matrix indexed by row or a formula describing the model. Note, that an intercept is always included, whether given in the formula or not.} \item{data}{an optional data frame containing the variables in the model (when using a formula).} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes the inner product in feature space between two vector arguments (see \code{\link{kernels}}). \pkg{kernlab} provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{features}{Number of features (principal components) to return. (default: 5)} \item{eta}{The hebbian learning rate (default : 0.005)} \item{th}{the smallest value of the convergence step (default : 0.0001) } \item{maxiter}{the maximum number of iterations.} \item{verbose}{print convergence every 100 iterations. (default : FALSE)} \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} \item{\dots}{ additional parameters} } \details{The original form of KPCA can only be used on small data sets since it requires the estimation of the eigenvectors of a full kernel matrix. The Kernel Hebbian Algorithm iteratively estimates the Kernel Principal Components with only linear order memory complexity. (see ref. for more details) } \value{ An S4 object containing the principal component vectors along with the corresponding normalization values. \item{pcv}{a matrix containing the principal component vectors (column wise)} \item{eig}{The normalization values} \item{xmatrix}{The original data matrix} all the slots of the object can be accessed by accessor functions. } \note{The predict function can be used to embed new data on the new space} \references{Kwang In Kim, M.O. Franz and B. Schlkopf\cr \emph{Kernel Hebbian Algorithm for Iterative Kernel Principal Component Analysis}\cr Max-Planck-Institut fr biologische Kybernetik, Tbingen (109)\cr \url{http://www.kyb.tuebingen.mpg.de/publications/pdfs/pdf2302.pdf} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{kpca}}, \code{\link{kfa}}, \code{\link{kcca}}, \code{pca}} \examples{ # another example using the iris data(iris) test <- sample(1:150,70) kpc <- kha(~.,data=iris[-test,-5],kernel="rbfdot", kpar=list(sigma=0.2),features=2, eta=0.001, maxiter=65) #print the principal component vectors pcv(kpc) #plot the data projection on the components plot(predict(kpc,iris[,-5]),col=as.integer(iris[,5]), xlab="1st Principal Component",ylab="2nd Principal Component") } \keyword{cluster} kernlab/man/kkmeans.Rd0000644000175100001440000001346212560414652014414 0ustar hornikusers\name{kkmeans} \alias{kkmeans} \alias{kkmeans,matrix-method} \alias{kkmeans,formula-method} \alias{kkmeans,list-method} \alias{kkmeans,kernelMatrix-method} \title{Kernel k-means} \description{ A weighted kernel version of the famous k-means algorithm. } \usage{ \S4method{kkmeans}{formula}(x, data = NULL, na.action = na.omit, ...) \S4method{kkmeans}{matrix}(x, centers, kernel = "rbfdot", kpar = "automatic", alg="kkmeans", p=1, na.action = na.omit, ...) \S4method{kkmeans}{kernelMatrix}(x, centers, ...) \S4method{kkmeans}{list}(x, centers, kernel = "stringdot", kpar = list(length=4, lambda=0.5), alg ="kkmeans", p = 1, na.action = na.omit, ...) } \arguments{ \item{x}{the matrix of data to be clustered, or a symbolic description of the model to be fit, or a kernel Matrix of class \code{kernelMatrix}, or a list of character vectors.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which `kkmeans' is called from.} \item{centers}{Either the number of clusters or a matrix of initial cluster centers. If the first a random initial partitioning is used.} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a inner product in feature space between two vector arguments (see \code{link{kernels}}). \pkg{kernlab} provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel "Gaussian" \item \code{polydot} Polynomial kernel \item \code{vanilladot} Linear kernel \item \code{tanhdot} Hyperbolic tangent kernel \item \code{laplacedot} Laplacian kernel \item \code{besseldot} Bessel kernel \item \code{anovadot} ANOVA RBF kernel \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } Setting the kernel parameter to "matrix" treats \code{x} as a kernel matrix calling the \code{kernelMatrix} interface.\cr The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{a character string or the list of hyper-parameters (kernel parameters). The default character string \code{"automatic"} uses a heuristic the determine a suitable value for the width parameter of the RBF kernel.\cr A list can also be used containing the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". \item \code{length, lambda, normalized} for the "stringdot" kernel where length is the length of the strings considered, lambda the decay factor and normalized a logical parameter determining if the kernel evaluations should be normalized. } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{alg}{the algorithm to use. Options currently include \code{kkmeans} and \code{kerninghan}. } \item{p}{a parameter used to keep the affinity matrix positive semidefinite} \item{na.action}{The action to perform on NA} \item{\dots}{additional parameters} } \details{ \code{kernel k-means} uses the 'kernel trick' (i.e. implicitly projecting all data into a non-linear feature space with the use of a kernel) in order to deal with one of the major drawbacks of \code{k-means} that is that it cannot capture clusters that are not linearly separable in input space. \cr The algorithm is implemented using the triangle inequality to avoid unnecessary and computational expensive distance calculations. This leads to significant speedup particularly on large data sets with a high number of clusters. \cr With a particular choice of weights this algorithm becomes equivalent to Kernighan-Lin, and the norm-cut graph partitioning algorithms. \cr The function also support input in the form of a kernel matrix or a list of characters for text clustering.\cr The data can be passed to the \code{kkmeans} function in a \code{matrix} or a \code{data.frame}, in addition \code{kkmeans} also supports input in the form of a kernel matrix of class \code{kernelMatrix} or as a list of character vectors where a string kernel has to be used. } \value{ An S4 object of class \code{specc} which extends the class \code{vector} containing integers indicating the cluster to which each point is allocated. The following slots contain useful information \item{centers}{A matrix of cluster centers.} \item{size}{The number of point in each cluster} \item{withinss}{The within-cluster sum of squares for each cluster} \item{kernelf}{The kernel function used} } \references{ Inderjit Dhillon, Yuqiang Guan, Brian Kulis\cr A Unified view of Kernel k-means, Spectral Clustering and Graph Partitioning\cr UTCS Technical Report\cr \url{http://web.cse.ohio-state.edu/~kulis/pubs/spectral_techreport.pdf} } \author{ Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{specc}}, \code{\link{kpca}}, \code{\link{kcca}} } \examples{ ## Cluster the iris data set. data(iris) sc <- kkmeans(as.matrix(iris[,-5]), centers=3) sc centers(sc) size(sc) withinss(sc) } \keyword{cluster} kernlab/man/kpca-class.Rd0000644000175100001440000000455712117363140015002 0ustar hornikusers\name{kpca-class} \docType{class} \alias{kpca-class} \alias{rotated} \alias{eig,kpca-method} \alias{kcall,kpca-method} \alias{kernelf,kpca-method} \alias{pcv,kpca-method} \alias{rotated,kpca-method} \alias{xmatrix,kpca-method} \title{Class "kpca"} \description{ The Kernel Principal Components Analysis class} \section{Objects of class "kpca"}{ Objects can be created by calls of the form \code{new("kpca", ...)}. or by calling the \code{kpca} function. } \section{Slots}{ \describe{ \item{\code{pcv}:}{Object of class \code{"matrix"} containing the principal component vectors } \item{\code{eig}:}{Object of class \code{"vector"} containing the corresponding eigenvalues} \item{\code{rotated}:}{Object of class \code{"matrix"} containing the projection of the data on the principal components} \item{\code{kernelf}:}{Object of class \code{"function"} containing the kernel function used} \item{\code{kpar}:}{Object of class \code{"list"} containing the kernel parameters used } \item{\code{xmatrix}:}{Object of class \code{"matrix"} containing the data matrix used } \item{\code{kcall}:}{Object of class \code{"ANY"} containing the function call } \item{\code{n.action}:}{Object of class \code{"ANY"} containing the action performed on NA } } } \section{Methods}{ \describe{ \item{eig}{\code{signature(object = "kpca")}: returns the eigenvalues } \item{kcall}{\code{signature(object = "kpca")}: returns the performed call} \item{kernelf}{\code{signature(object = "kpca")}: returns the used kernel function} \item{pcv}{\code{signature(object = "kpca")}: returns the principal component vectors } \item{predict}{\code{signature(object = "kpca")}: embeds new data } \item{rotated}{\code{signature(object = "kpca")}: returns the projected data} \item{xmatrix}{\code{signature(object = "kpca")}: returns the used data matrix } } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{ksvm-class}}, \code{\link{kcca-class}} } \examples{ # another example using the iris data(iris) test <- sample(1:50,20) kpc <- kpca(~.,data=iris[-test,-5],kernel="rbfdot", kpar=list(sigma=0.2),features=2) #print the principal component vectors pcv(kpc) rotated(kpc) kernelf(kpc) eig(kpc) } \keyword{classes} kernlab/man/ipop-class.Rd0000644000175100001440000000313311304023134015011 0ustar hornikusers\name{ipop-class} \docType{class} \alias{ipop-class} \alias{primal,ipop-method} \alias{dual,ipop-method} \alias{how,ipop-method} \alias{primal} \alias{dual} \alias{how} \title{Class "ipop"} \description{The quadratic problem solver class} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("ipop", ...)}. or by calling the \code{ipop} function. } \section{Slots}{ \describe{ \item{\code{primal}:}{Object of class \code{"vector"} the primal solution of the problem} \item{\code{dual}:}{Object of class \code{"numeric"} the dual of the problem} \item{\code{how}:}{Object of class \code{"character"} convergence information} } } \section{Methods}{ \describe{ \item{primal}{Object of class \code{ipop}}{Return the primal of the problem} \item{dual}{Object of class \code{ipop}}{Return the dual of the problem} \item{how}{Object of class \code{ipop}}{Return information on convergence} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{ipop}} } \examples{ ## solve the Support Vector Machine optimization problem data(spam) ## sample a scaled part (300 points) of the spam data set m <- 300 set <- sample(1:dim(spam)[1],m) x <- scale(as.matrix(spam[,-58]))[set,] y <- as.integer(spam[set,58]) y[y==2] <- -1 ##set C parameter and kernel C <- 5 rbf <- rbfdot(sigma = 0.1) ## create H matrix etc. H <- kernelPol(rbf,x,,y) c <- matrix(rep(-1,m)) A <- t(y) b <- 0 l <- matrix(rep(0,m)) u <- matrix(rep(C,m)) r <- 0 sv <- ipop(c,H,A,b,l,u,r) primal(sv) dual(sv) how(sv) } \keyword{classes} kernlab/man/reuters.Rd0000644000175100001440000000111711304023134014430 0ustar hornikusers\name{reuters} \alias{reuters} \alias{rlabels} \title{Reuters Text Data} \description{A small sample from the Reuters news data set.} \usage{data(reuters)} \format{ A list of 40 text documents along with the labels. \code{reuters} contains the text documents and \code{rlabels} the labels in a vector. } \details{ This dataset contains a list of 40 text documents along with the labels. The data consist out of 20 documents from the \code{acq} category and 20 documents from the crude category. The labels are stored in \code{rlabels} } \source{Reuters} \keyword{datasets} kernlab/man/inchol-class.Rd0000644000175100001440000000315211304023134015317 0ustar hornikusers\name{inchol-class} \docType{class} \alias{inchol-class} \alias{diagresidues} \alias{maxresiduals} \alias{pivots} \alias{diagresidues,inchol-method} \alias{maxresiduals,inchol-method} \alias{pivots,inchol-method} \title{Class "inchol" } \description{The reduced Cholesky decomposition object} \section{Objects from the Class}{Objects can be created by calls of the form \code{new("inchol", ...)}. or by calling the \code{inchol} function.} \section{Slots}{ \describe{ \item{\code{.Data}:}{Object of class \code{"matrix"} contains the decomposed matrix} \item{\code{pivots}:}{Object of class \code{"vector"} contains the pivots performed} \item{\code{diagresidues}:}{Object of class \code{"vector"} contains the diagonial residues} \item{\code{maxresiduals}:}{Object of class \code{"vector"} contains the maximum residues} } } \section{Extends}{ Class \code{"matrix"}, directly. } \section{Methods}{ \describe{ \item{diagresidues}{\code{signature(object = "inchol")}: returns the diagonial residues} \item{maxresiduals}{\code{signature(object = "inchol")}: returns the maximum residues} \item{pivots}{\code{signature(object = "inchol")}: returns the pivots performed} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{inchol}}, \code{\link{csi-class}}, \code{\link{csi}}} \examples{ data(iris) datamatrix <- as.matrix(iris[,-5]) # initialize kernel function rbf <- rbfdot(sigma=0.1) rbf Z <- inchol(datamatrix,kernel=rbf) dim(Z) pivots(Z) diagresidues(Z) maxresiduals(Z) } \keyword{classes} kernlab/man/kcca.Rd0000644000175100001440000000715511304023134013650 0ustar hornikusers\name{kcca} \alias{kcca} \alias{kcca,matrix-method} \title{Kernel Canonical Correlation Analysis} \description{ Computes the canonical correlation analysis in feature space. } \usage{ \S4method{kcca}{matrix}(x, y, kernel="rbfdot", kpar=list(sigma=0.1), gamma = 0.1, ncomps = 10, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{a matrix containing data index by row} \item{y}{a matrix containing data index by row} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a inner product in feature space between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{gamma}{regularization parameter (default : 0.1)} \item{ncomps}{number of canonical components (default : 10) } \item{\dots}{additional parameters for the \code{kpca} function} } \details{ The kernel version of canonical correlation analysis. Kernel Canonical Correlation Analysis (KCCA) is a non-linear extension of CCA. Given two random variables, KCCA aims at extracting the information which is shared by the two random variables. More precisely given \eqn{x} and \eqn{y} the purpose of KCCA is to provide nonlinear mappings \eqn{f(x)} and \eqn{g(y)} such that their correlation is maximized. } \value{ An S4 object containing the following slots: \item{kcor}{Correlation coefficients in feature space} \item{xcoef}{estimated coefficients for the \code{x} variables in the feature space} \item{ycoef}{estimated coefficients for the \code{y} variables in the feature space} %% \item{xvar}{The canonical variates for \code{x}} %% \item{yvar}{The canonical variates for \code{y}} } \references{ Malte Kuss, Thore Graepel \cr \emph{The Geometry Of Kernel Canonical Correlation Analysis}\cr \url{http://www.kyb.tuebingen.mpg.de/publications/pdfs/pdf2233.pdf}} \author{ Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{cancor}}, \code{\link{kpca}}, \code{\link{kfa}}, \code{\link{kha}}} \examples{ ## dummy data x <- matrix(rnorm(30),15) y <- matrix(rnorm(30),15) kcca(x,y,ncomps=2) } \keyword{multivariate} kernlab/man/kcca-class.Rd0000644000175100001440000000345511304023134014752 0ustar hornikusers\name{kcca-class} \docType{class} \alias{kcca-class} \alias{kcor} \alias{xcoef} \alias{ycoef} %%\alias{yvar} %%\alias{xvar} \alias{kcor,kcca-method} \alias{xcoef,kcca-method} \alias{xvar,kcca-method} \alias{ycoef,kcca-method} \alias{yvar,kcca-method} \title{Class "kcca"} \description{The "kcca" class } \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("kcca", ...)}. or by the calling the \code{kcca} function. } \section{Slots}{ \describe{ \item{\code{kcor}:}{Object of class \code{"vector"} describing the correlations} \item{\code{xcoef}:}{Object of class \code{"matrix"} estimated coefficients for the \code{x} variables} \item{\code{ycoef}:}{Object of class \code{"matrix"} estimated coefficients for the \code{y} variables } %% \item{\code{xvar}:}{Object of class \code{"matrix"} holds the %% canonical variates for \code{x}} %% \item{\code{yvar}:}{Object of class \code{"matrix"} holds the %% canonical variates for \code{y}} } } \section{Methods}{ \describe{ \item{kcor}{\code{signature(object = "kcca")}: returns the correlations} \item{xcoef}{\code{signature(object = "kcca")}: returns the estimated coefficients for the \code{x} variables} \item{ycoef}{\code{signature(object = "kcca")}: returns the estimated coefficients for the \code{y} variables } %% \item{xvar}{\code{signature(object = "kcca")}: returns the canonical %% variates for \code{x}} %% \item{yvar}{\code{signature(object = "kcca")}: returns the canonical %% variates for \code{y}} } } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{kcca}}, \code{\link{kpca-class}} } \examples{ ## dummy data x <- matrix(rnorm(30),15) y <- matrix(rnorm(30),15) kcca(x,y,ncomps=2) } \keyword{classes} kernlab/man/ipop.Rd0000644000175100001440000000531712560414652013732 0ustar hornikusers\name{ipop} \alias{ipop} \alias{ipop,ANY,matrix-method} \title{Quadratic Programming Solver} \description{ ipop solves the quadratic programming problem :\cr \eqn{\min(c'*x + 1/2 * x' * H * x)}\cr subject to: \cr \eqn{b <= A * x <= b + r}\cr \eqn{l <= x <= u} } \usage{ ipop(c, H, A, b, l, u, r, sigf = 7, maxiter = 40, margin = 0.05, bound = 10, verb = 0) } \arguments{ \item{c}{Vector or one column matrix appearing in the quadratic function} \item{H}{square matrix appearing in the quadratic function, or the decomposed form \eqn{Z} of the \eqn{H} matrix where \eqn{Z} is a \eqn{n x m} matrix with \eqn{n > m} and \eqn{ZZ' = H}.} \item{A}{Matrix defining the constrains under which we minimize the quadratic function} \item{b}{Vector or one column matrix defining the constrains} \item{l}{Lower bound vector or one column matrix} \item{u}{Upper bound vector or one column matrix} \item{r}{Vector or one column matrix defining constrains} \item{sigf}{Precision (default: 7 significant figures)} \item{maxiter}{Maximum number of iterations} \item{margin}{how close we get to the constrains} \item{bound}{Clipping bound for the variables} \item{verb}{Display convergence information during runtime} } \details{ ipop uses an interior point method to solve the quadratic programming problem. \cr The \eqn{H} matrix can also be provided in the decomposed form \eqn{Z} where \eqn{ZZ' = H} in that case the Sherman Morrison Woodbury formula is used internally. } \value{ An S4 object with the following slots \item{primal}{Vector containing the primal solution of the quadratic problem} \item{dual}{The dual solution of the problem} \item{how}{Character string describing the type of convergence} all slots can be accessed through accessor functions (see example) } \references{ R. J. Vanderbei\cr \emph{LOQO: An interior point code for quadratic programming}\cr Optimization Methods and Software 11, 451-484, 1999 \cr \url{http://www.princeton.edu/~rvdb/ps/loqo5.pdf} } \author{Alexandros Karatzoglou (based on Matlab code by Alex Smola) \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{solve.QP}, \code{\link{inchol}}, \code{\link{csi}}} \examples{ ## solve the Support Vector Machine optimization problem data(spam) ## sample a scaled part (500 points) of the spam data set m <- 500 set <- sample(1:dim(spam)[1],m) x <- scale(as.matrix(spam[,-58]))[set,] y <- as.integer(spam[set,58]) y[y==2] <- -1 ##set C parameter and kernel C <- 5 rbf <- rbfdot(sigma = 0.1) ## create H matrix etc. H <- kernelPol(rbf,x,,y) c <- matrix(rep(-1,m)) A <- t(y) b <- 0 l <- matrix(rep(0,m)) u <- matrix(rep(C,m)) r <- 0 sv <- ipop(c,H,A,b,l,u,r) sv dual(sv) } \keyword{optimize} kernlab/man/lssvm.Rd0000644000175100001440000002010212117365064014114 0ustar hornikusers\name{lssvm} \docType{methods} \alias{lssvm} \alias{lssvm-methods} \alias{lssvm,formula-method} \alias{lssvm,vector-method} \alias{lssvm,matrix-method} \alias{lssvm,list-method} \alias{lssvm,kernelMatrix-method} \alias{show,lssvm-method} \alias{coef,lssvm-method} \alias{predict,lssvm-method} \title{Least Squares Support Vector Machine} \description{ The \code{lssvm} function is an implementation of the Least Squares SVM. \code{lssvm} includes a reduced version of Least Squares SVM using a decomposition of the kernel matrix which is calculated by the \code{csi} function. } \usage{ \S4method{lssvm}{formula}(x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE) \S4method{lssvm}{vector}(x, ...) \S4method{lssvm}{matrix}(x, y, scaled = TRUE, kernel = "rbfdot", kpar = "automatic", type = NULL, tau = 0.01, reduced = TRUE, tol = 0.0001, rank = floor(dim(x)[1]/3), delta = 40, cross = 0, fit = TRUE, ..., subset, na.action = na.omit) \S4method{lssvm}{kernelMatrix}(x, y, type = NULL, tau = 0.01, tol = 0.0001, rank = floor(dim(x)[1]/3), delta = 40, cross = 0, fit = TRUE, ...) \S4method{lssvm}{list}(x, y, scaled = TRUE, kernel = "stringdot", kpar = list(length=4, lambda = 0.5), type = NULL, tau = 0.01, reduced = TRUE, tol = 0.0001, rank = floor(dim(x)[1]/3), delta = 40, cross = 0, fit = TRUE, ..., subset) } \arguments{ \item{x}{a symbolic description of the model to be fit, a matrix or vector containing the training data when a formula interface is not used or a \code{kernelMatrix} or a list of character vectors.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which `lssvm' is called from.} \item{y}{a response vector with one label for each row/component of \code{x}. Can be either a factor (for classification tasks) or a numeric vector (for classification or regression - currently nor supported -).} \item{scaled}{A logical vector indicating the variables to be scaled. If \code{scaled} is of length 1, the value is recycled as many times as needed and all non-binary variables are scaled. Per default, data are scaled internally to zero mean and unit variance. The center and scale values are returned and used for later predictions.} \item{type}{Type of problem. Either "classification" or "regression". Depending on whether \code{y} is a factor or not, the default setting for \code{type} is "classification" or "regression" respectively, but can be overwritten by setting an explicit value. (regression is currently not supported)\cr} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel "Gaussian" \item \code{polydot} Polynomial kernel \item \code{vanilladot} Linear kernel \item \code{tanhdot} Hyperbolic tangent kernel \item \code{laplacedot} Laplacian kernel \item \code{besseldot} Bessel kernel \item \code{anovadot} ANOVA RBF kernel \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } Setting the kernel parameter to "matrix" treats \code{x} as a kernel matrix calling the \code{kernelMatrix} interface.\cr The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{ the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. For valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". \item \code{length, lambda, normalized} for the "stringdot" kernel where length is the length of the strings considered, lambda the decay factor and normalized a logical parameter determining if the kernel evaluations should be normalized. } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.\cr \code{kpar} can also be set to the string "automatic" which uses the heuristics in \code{\link{sigest}} to calculate a good \code{sigma} value for the Gaussian RBF or Laplace kernel, from the data. (default = "automatic"). } \item{tau}{the regularization parameter (default 0.01) } \item{reduced}{if set to \code{FALSE} the full linear problem of the lssvm is solved, when \code{TRUE} a reduced method using \code{csi} is used.} \item{rank}{the maximal rank of the decomposed kernel matrix, see \code{csi}} \item{delta}{number of columns of cholesky performed in advance, see \code{csi} (default 40)} \item{tol}{tolerance of termination criterion for the \code{csi} function, lower tolerance leads to more precise approximation but may increase the training time and the decomposed matrix size (default: 0.0001)} \item{fit}{indicates whether the fitted values should be computed and included in the model or not (default: 'TRUE')} \item{cross}{if a integer value k>0 is specified, a k-fold cross validation on the training data is performed to assess the quality of the model: the Mean Squared Error for regression} \item{subset}{An index vector specifying the cases to be used in the training sample. (NOTE: If given, this argument must be named.)} \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} \item{\dots}{ additional parameters} } \details{Least Squares Support Vector Machines are reformulation to the standard SVMs that lead to solving linear KKT systems. The algorithm is based on the minimization of a classical penalized least-squares cost function. The current implementation approximates the kernel matrix by an incomplete Cholesky factorization obtained by the \code{\link{csi}} function, thus the solution is an approximation to the exact solution of the lssvm optimization problem. The quality of the solution depends on the approximation and can be influenced by the "rank" , "delta", and "tol" parameters. } \value{ An S4 object of class \code{"lssvm"} containing the fitted model, Accessor functions can be used to access the slots of the object (see examples) which include: \item{alpha}{the parameters of the \code{"lssvm"}} \item{coef}{the model coefficients (identical to alpha)} \item{b}{the model offset.} \item{xmatrix}{the training data used by the model} } \references{ J. A. K. Suykens and J. Vandewalle\cr \emph{Least Squares Support Vector Machine Classifiers}\cr Neural Processing Letters vol. 9, issue 3, June 1999\cr } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{ksvm}}, \code{\link{gausspr}}, \code{\link{csi}} } \examples{ ## simple example data(iris) lir <- lssvm(Species~.,data=iris) lir lirr <- lssvm(Species~.,data= iris, reduced = FALSE) lirr ## Using the kernelMatrix interface iris <- unique(iris) rbf <- rbfdot(0.5) k <- kernelMatrix(rbf, as.matrix(iris[,-5])) klir <- lssvm(k, iris[, 5]) klir pre <- predict(klir, k) } \keyword{classif} \keyword{nonlinear} \keyword{methods} kernlab/man/sigest.Rd0000644000175100001440000000631712117366220014255 0ustar hornikusers\name{sigest} \alias{sigest} \alias{sigest,formula-method} \alias{sigest,matrix-method} \title{Hyperparameter estimation for the Gaussian Radial Basis kernel} \description{ Given a range of values for the "sigma" inverse width parameter in the Gaussian Radial Basis kernel for use with Support Vector Machines. The estimation is based on the data to be used. } \usage{ \S4method{sigest}{formula}(x, data=NULL, frac = 0.5, na.action = na.omit, scaled = TRUE) \S4method{sigest}{matrix}(x, frac = 0.5, scaled = TRUE, na.action = na.omit) } \arguments{ \item{x}{a symbolic description of the model upon the estimation is based. When not using a formula x is a matrix or vector containing the data} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which `ksvm' is called from.} \item{frac}{Fraction of data to use for estimation. By default a quarter of the data is used to estimate the range of the sigma hyperparameter.} \item{scaled}{A logical vector indicating the variables to be scaled. If \code{scaled} is of length 1, the value is recycled as many times as needed and all non-binary variables are scaled. Per default, data are scaled internally to zero mean and unit variance (since this the default action in \code{ksvm} as well). The center and scale values are returned and used for later predictions. } \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} } \details{ \code{sigest} estimates the range of values for the sigma parameter which would return good results when used with a Support Vector Machine (\code{ksvm}). The estimation is based upon the 0.1 and 0.9 quantile of \eqn{\|x -x'\|^2}. Basically any value in between those two bounds will produce good results. } \value{ Returns a vector of length 3 defining the range (0.1 quantile, median and 0.9 quantile) of the sigma hyperparameter. } \references{ B. Caputo, K. Sim, F. Furesjo, A. Smola, \cr \emph{Appearance-based object recognition using SVMs: which kernel should I use?}\cr Proc of NIPS workshop on Statitsical methods for computational experiments in visual processing and computer vision, Whistler, 2002. } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{ksvm}}} \examples{ ## estimate good sigma values for promotergene data(promotergene) srange <- sigest(Class~.,data = promotergene) srange s <- srange[2] s ## create test and training set ind <- sample(1:dim(promotergene)[1],20) genetrain <- promotergene[-ind, ] genetest <- promotergene[ind, ] ## train a support vector machine gene <- ksvm(Class~.,data=genetrain,kernel="rbfdot", kpar=list(sigma = s),C=50,cross=3) gene ## predict gene type on the test set promoter <- predict(gene,genetest[,-1]) ## Check results table(promoter,genetest[,1]) } \keyword{classif} \keyword{regression} kernlab/man/plot.Rd0000644000175100001440000000216511304023134013721 0ustar hornikusers\name{plot} \alias{plot.ksvm} \alias{plot,ksvm,missing-method} \alias{plot,ksvm-method} \title{plot method for support vector object} \description{Plot a binary classification support vector machine object. The \code{plot} function returns a contour plot of the decision values. } \usage{ \S4method{plot}{ksvm}(object, data=NULL, grid = 50, slice = list()) } \arguments{ \item{object}{a \code{ksvm} classification object created by the \code{ksvm} function} \item{data}{a data frame or matrix containing data to be plotted} \item{grid}{granularity for the contour plot.} \item{slice}{a list of named numeric values for the dimensions held constant (only needed if more than two variables are used). Dimensions not specified are fixed at 0. } } \seealso{\code{\link{ksvm}}} \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \keyword{methods} \keyword{regression} \keyword{classif} \examples{ ## Demo of the plot function x <- rbind(matrix(rnorm(120),,2),matrix(rnorm(120,mean=3),,2)) y <- matrix(c(rep(1,60),rep(-1,60))) svp <- ksvm(x,y,type="C-svc") plot(svp,data=x) } kernlab/man/specc-class.Rd0000644000175100001440000000315311304023134015141 0ustar hornikusers\name{specc-class} \docType{class} \alias{specc-class} \alias{centers} \alias{size} \alias{withinss} \alias{centers,specc-method} \alias{withinss,specc-method} \alias{size,specc-method} \alias{kernelf,specc-method} \title{Class "specc"} \description{ The Spectral Clustering Class} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("specc", ...)}. or by calling the function \code{specc}. } \section{Slots}{ \describe{ \item{\code{.Data}:}{Object of class \code{"vector"} containing the cluster assignments} \item{\code{centers}:}{Object of class \code{"matrix"} containing the cluster centers} \item{\code{size}:}{Object of class \code{"vector"} containing the number of points in each cluster} \item{\code{withinss}:}{Object of class \code{"vector"} containing the within-cluster sum of squares for each cluster} \item{\code{kernelf}}{Object of class \code{kernel} containing the used kernel function.} } } \section{Methods}{ \describe{ \item{centers}{\code{signature(object = "specc")}: returns the cluster centers} \item{withinss}{\code{signature(object = "specc")}: returns the within-cluster sum of squares for each cluster} \item{size}{\code{signature(object = "specc")}: returns the number of points in each cluster } } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{ \code{\link{specc}}, \code{\link{kpca-class}} } \examples{ ## Cluster the spirals data set. data(spirals) sc <- specc(spirals, centers=2) centers(sc) size(sc) } \keyword{classes} kernlab/man/gausspr.Rd0000644000175100001440000001661412560371302014443 0ustar hornikusers\name{gausspr} \alias{gausspr} \alias{gausspr,formula-method} \alias{gausspr,vector-method} \alias{gausspr,matrix-method} \alias{coef,gausspr-method} \alias{show,gausspr-method} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Gaussian processes for regression and classification} \description{ \code{gausspr} is an implementation of Gaussian processes for classification and regression. } \usage{ \S4method{gausspr}{formula}(x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE) \S4method{gausspr}{vector}(x,...) \S4method{gausspr}{matrix}(x, y, scaled = TRUE, type= NULL, kernel="rbfdot", kpar="automatic", var=1, variance.model = FALSE, tol=0.0005, cross=0, fit=TRUE, ... , subset, na.action = na.omit) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{a symbolic description of the model to be fit or a matrix or vector when a formula interface is not used. When not using a formula x is a matrix or vector containing the variables in the model} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which `gausspr' is called from.} \item{y}{a response vector with one label for each row/component of \code{x}. Can be either a factor (for classification tasks) or a numeric vector (for regression).} \item{type}{Type of problem. Either "classification" or "regression". Depending on whether \code{y} is a factor or not, the default setting for \code{type} is \code{classification} or \code{regression}, respectively, but can be overwritten by setting an explicit value.\cr} \item{scaled}{A logical vector indicating the variables to be scaled. If \code{scaled} is of length 1, the value is recycled as many times as needed and all non-binary variables are scaled. Per default, data are scaled internally (both \code{x} and \code{y} variables) to zero mean and unit variance. The center and scale values are returned and used for later predictions.} \item{kernel}{the kernel function used in training and predicting. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{the list of hyper-parameters (kernel parameters). This is a list which contains the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{var}{the initial noise variance, (only for regression) (default : 0.001)} \item{variance.model}{build model for variance or standard deviation estimation (only for regression) (default : FALSE)} \item{tol}{tolerance of termination criterion (default: 0.001)} \item{fit}{indicates whether the fitted values should be computed and included in the model or not (default: 'TRUE')} \item{cross}{if a integer value k>0 is specified, a k-fold cross validation on the training data is performed to assess the quality of the model: the Mean Squared Error for regression} \item{subset}{An index vector specifying the cases to be used in the training sample. (NOTE: If given, this argument must be named.)} \item{na.action}{A function to specify the action to be taken if \code{NA}s are found. The default action is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. An alternative is \code{na.fail}, which causes an error if \code{NA} cases are found. (NOTE: If given, this argument must be named.)} \item{\dots}{ additional parameters} } \details{ A Gaussian process is specified by a mean and a covariance function. The mean is a function of \eqn{x} (which is often the zero function), and the covariance is a function \eqn{C(x,x')} which expresses the expected covariance between the value of the function \eqn{y} at the points \eqn{x} and \eqn{x'}. The actual function \eqn{y(x)} in any data modeling problem is assumed to be a single sample from this Gaussian distribution. Laplace approximation is used for the parameter estimation in gaussian processes for classification.\cr The predict function can return class probabilities for classification problems by setting the \code{type} parameter to "probabilities". For the regression setting the \code{type} parameter to "variance" or "sdeviation" returns the estimated variance or standard deviation at each predicted point. } \value{ An S4 object of class "gausspr" containing the fitted model along with information. Accessor functions can be used to access the slots of the object which include : \item{alpha}{The resulting model parameters} \item{error}{Training error (if fit == TRUE)} } \references{ C. K. I. Williams and D. Barber \cr Bayesian classification with Gaussian processes. \cr IEEE Transactions on Pattern Analysis and Machine Intelligence, 20(12):1342-1351, 1998\cr \url{http://www.dai.ed.ac.uk/homes/ckiw/postscript/pami_final.ps.gz} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \seealso{\code{\link{predict.gausspr}}, \code{\link{rvm}}, \code{\link{ksvm}}, \code{\link{gausspr-class}}, \code{\link{lssvm}} } \examples{ # train model data(iris) test <- gausspr(Species~.,data=iris,var=2) test alpha(test) # predict on the training set predict(test,iris[,-5]) # class probabilities predict(test, iris[,-5], type="probabilities") # create regression data x <- seq(-20,20,0.1) y <- sin(x)/x + rnorm(401,sd=0.03) # regression with gaussian processes foo <- gausspr(x, y) foo # predict and plot ytest <- predict(foo, x) plot(x, y, type ="l") lines(x, ytest, col="red") #predict and variance x = c(-4, -3, -2, -1, 0, 0.5, 1, 2) y = c(-2, 0, -0.5,1, 2, 1, 0, -1) plot(x,y) foo2 <- gausspr(x, y, variance.model = TRUE) xtest <- seq(-4,2,0.2) lines(xtest, predict(foo2, xtest)) lines(xtest, predict(foo2, xtest)+2*predict(foo2,xtest, type="sdeviation"), col="red") lines(xtest, predict(foo2, xtest)-2*predict(foo2,xtest, type="sdeviation"), col="red") } \keyword{classif} \keyword{regression} \keyword{nonlinear} \keyword{methods} kernlab/man/income.Rd0000644000175100001440000000370611304023134014217 0ustar hornikusers\name{income} \alias{income} \title{Income Data} \description{ Customer Income Data from a marketing survey. } \usage{data(income)} \format{ A data frame with 14 categorical variables (8993 observations). Explanation of the variable names: \tabular{rllll}{ \tab 1 \tab \code{INCOME} \tab annual income of household \tab \cr \tab \tab \tab (Personal income if single) \tab ordinal\cr \tab 2 \tab \code{SEX} \tab sex \tab nominal\cr \tab 3 \tab \code{MARITAL.STATUS} \tab marital status \tab nominal\cr \tab 4 \tab \code{AGE} \tab age \tab ordinal\cr \tab 5 \tab \code{EDUCATION} \tab educational grade \tab ordinal\cr \tab 6 \tab \code{OCCUPATION} \tab type of work \tab nominal \cr \tab 7 \tab \code{AREA} \tab how long the interviewed person has lived\tab \cr \tab \tab \tab in the San Francisco/Oakland/San Jose area \tab ordinal\cr \tab 8 \tab \code{DUAL.INCOMES} \tab dual incomes (if married) \tab nominal\cr \tab 9 \tab \code{HOUSEHOLD.SIZE} \tab persons living in the household \tab ordinal\cr \tab 10 \tab \code{UNDER18} \tab persons in household under 18 \tab ordinal\cr \tab 11 \tab \code{HOUSEHOLDER} \tab householder status \tab nominal\cr \tab 12 \tab \code{HOME.TYPE} \tab type of home \tab nominal\cr \tab 13 \tab \code{ETHNIC.CLASS} \tab ethnic classification \tab nominal\cr \tab 14 \tab \code{LANGUAGE} \tab language most often spoken at home \tab nominal\cr } } \details{ A total of N=9409 questionnaires containing 502 questions were filled out by shopping mall customers in the San Francisco Bay area. The dataset is an extract from this survey. It consists of 14 demographic attributes. The dataset is a mixture of nominal and ordinal variables with a lot of missing data. The goal is to predict the Anual Income of Household from the other 13 demographics attributes. } \source{ Impact Resources, Inc., Columbus, OH (1987). } \keyword{datasets} kernlab/man/specc.Rd0000644000175100001440000001420712560414652014056 0ustar hornikusers\name{specc} \alias{specc} \alias{specc,matrix-method} \alias{specc,formula-method} \alias{specc,list-method} \alias{specc,kernelMatrix-method} \alias{show,specc-method} \title{Spectral Clustering} \description{ A spectral clustering algorithm. Clustering is performed by embedding the data into the subspace of the eigenvectors of an affinity matrix. } \usage{ \S4method{specc}{formula}(x, data = NULL, na.action = na.omit, ...) \S4method{specc}{matrix}(x, centers, kernel = "rbfdot", kpar = "automatic", nystrom.red = FALSE, nystrom.sample = dim(x)[1]/6, iterations = 200, mod.sample = 0.75, na.action = na.omit, ...) \S4method{specc}{kernelMatrix}(x, centers, nystrom.red = FALSE, iterations = 200, ...) \S4method{specc}{list}(x, centers, kernel = "stringdot", kpar = list(length=4, lambda=0.5), nystrom.red = FALSE, nystrom.sample = length(x)/6, iterations = 200, mod.sample = 0.75, na.action = na.omit, ...) } \arguments{ \item{x}{the matrix of data to be clustered, or a symbolic description of the model to be fit, or a kernel Matrix of class \code{kernelMatrix}, or a list of character vectors.} \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from the environment which `specc' is called from.} \item{centers}{Either the number of clusters or a set of initial cluster centers. If the first, a random set of rows in the eigenvectors matrix are chosen as the initial centers.} \item{kernel}{the kernel function used in computing the affinity matrix. This parameter can be set to any function, of class kernel, which computes a dot product between two vector arguments. kernlab provides the most popular kernel functions which can be used by setting the kernel parameter to the following strings: \itemize{ \item \code{rbfdot} Radial Basis kernel function "Gaussian" \item \code{polydot} Polynomial kernel function \item \code{vanilladot} Linear kernel function \item \code{tanhdot} Hyperbolic tangent kernel function \item \code{laplacedot} Laplacian kernel function \item \code{besseldot} Bessel kernel function \item \code{anovadot} ANOVA RBF kernel function \item \code{splinedot} Spline kernel \item \code{stringdot} String kernel } The kernel parameter can also be set to a user defined function of class kernel by passing the function name as an argument. } \item{kpar}{a character string or the list of hyper-parameters (kernel parameters). The default character string \code{"automatic"} uses a heuristic to determine a suitable value for the width parameter of the RBF kernel. The second option \code{"local"} (local scaling) uses a more advanced heuristic and sets a width parameter for every point in the data set. This is particularly useful when the data incorporates multiple scales. A list can also be used containing the parameters to be used with the kernel function. Valid parameters for existing kernels are : \itemize{ \item \code{sigma} inverse kernel width for the Radial Basis kernel function "rbfdot" and the Laplacian kernel "laplacedot". \item \code{degree, scale, offset} for the Polynomial kernel "polydot" \item \code{scale, offset} for the Hyperbolic tangent kernel function "tanhdot" \item \code{sigma, order, degree} for the Bessel kernel "besseldot". \item \code{sigma, degree} for the ANOVA kernel "anovadot". \item \code{length, lambda, normalized} for the "stringdot" kernel where length is the length of the strings considered, lambda the decay factor and normalized a logical parameter determining if the kernel evaluations should be normalized. } Hyper-parameters for user defined kernels can be passed through the kpar parameter as well.} \item{nystrom.red}{use nystrom method to calculate eigenvectors. When \code{TRUE} a sample of the dataset is used to calculate the eigenvalues, thus only a \eqn{n x m} matrix where \eqn{n} the sample size is stored in memory (default: \code{FALSE}} \item{nystrom.sample}{number of data points to use for estimating the eigenvalues when using the nystrom method. (default : dim(x)[1]/6)} \item{mod.sample}{proportion of data to use when estimating sigma (default: 0.75)} \item{iterations}{the maximum number of iterations allowed. } \item{na.action}{the action to perform on NA} \item{\dots}{additional parameters} } \details{ Spectral clustering works by embedding the data points of the partitioning problem into the subspace of the \eqn{k} largest eigenvectors of a normalized affinity/kernel matrix. Using a simple clustering method like \code{kmeans} on the embedded points usually leads to good performance. It can be shown that spectral clustering methods boil down to graph partitioning.\cr The data can be passed to the \code{specc} function in a \code{matrix} or a \code{data.frame}, in addition \code{specc} also supports input in the form of a kernel matrix of class \code{kernelMatrix} or as a list of character vectors where a string kernel has to be used.} \value{ An S4 object of class \code{specc} which extends the class \code{vector} containing integers indicating the cluster to which each point is allocated. The following slots contain useful information \item{centers}{A matrix of cluster centers.} \item{size}{The number of point in each cluster} \item{withinss}{The within-cluster sum of squares for each cluster} \item{kernelf}{The kernel function used} } \references{ Andrew Y. Ng, Michael I. Jordan, Yair Weiss\cr \emph{On Spectral Clustering: Analysis and an Algorithm}\cr Neural Information Processing Symposium 2001\cr \url{http://papers.nips.cc/paper/2092-on-spectral-clustering-analysis-and-an-algorithm.pdf} } \author{Alexandros Karatzoglou \cr \email{alexandros.karatzoglou@ci.tuwien.ac.at} } \seealso{\code{\link{kkmeans}}, \code{\link{kpca}}, \code{\link{kcca}} } \examples{ ## Cluster the spirals data set. data(spirals) sc <- specc(spirals, centers=2) sc centers(sc) size(sc) withinss(sc) plot(spirals, col=sc) } \keyword{cluster} kernlab/man/predict.ksvm.Rd0000644000175100001440000000511412560430652015365 0ustar hornikusers\name{predict.ksvm} \alias{predict.ksvm} \alias{predict,ksvm-method} \title{predict method for support vector object} \description{Prediction of test data using support vector machines} \usage{ \S4method{predict}{ksvm}(object, newdata, type = "response", coupler = "minpair") } \arguments{ \item{object}{an S4 object of class \code{ksvm} created by the \code{ksvm} function} \item{newdata}{a data frame or matrix containing new data} \item{type}{one of \code{response}, \code{probabilities} ,\code{votes}, \code{decision} indicating the type of output: predicted values, matrix of class probabilities, matrix of vote counts, or matrix of decision values.} \item{coupler}{Coupling method used in the multiclass case, can be one of \code{minpair} or \code{pkpd} (see reference for more details).} } \value{ If \code{type(object)} is \code{C-svc}, \code{nu-svc}, \code{C-bsvm} or \code{spoc-svc} the vector returned depends on the argument \code{type}: \item{response}{predicted classes (the classes with majority vote).} \item{probabilities}{matrix of class probabilities (one column for each class and one row for each input).} \item{votes}{matrix of vote counts (one column for each class and one row for each new input)} If \code{type(object)} is \code{eps-svr}, \code{eps-bsvr} or \code{nu-svr} a vector of predicted values is returned. If \code{type(object)} is \code{one-classification} a vector of logical values is returned. } \references{ \itemize{ \item T.F. Wu, C.J. Lin, R.C. Weng. \cr \emph{Probability estimates for Multi-class Classification by Pairwise Coupling}\cr \url{http://www.csie.ntu.edu.tw/~cjlin/papers/svmprob/svmprob.pdf} \item H.T. Lin, C.J. Lin, R.C. Weng\cr \emph{A note on Platt's probabilistic outputs for support vector machines}\cr \url{http://www.csie.ntu.edu.tw/~cjlin/papers/plattprob.pdf} } } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \keyword{methods} \keyword{regression} \keyword{classif} \examples{ ## example using the promotergene data set data(promotergene) ## create test and training set ind <- sample(1:dim(promotergene)[1],20) genetrain <- promotergene[-ind, ] genetest <- promotergene[ind, ] ## train a support vector machine gene <- ksvm(Class~.,data=genetrain,kernel="rbfdot", kpar=list(sigma=0.015),C=70,cross=4,prob.model=TRUE) gene ## predict gene type probabilities on the test set genetype <- predict(gene,genetest,type="probabilities") genetype } kernlab/man/predict.kqr.Rd0000644000175100001440000000214112117365174015203 0ustar hornikusers\name{predict.kqr} \alias{predict.kqr} \alias{predict,kqr-method} \title{Predict method for kernel Quantile Regression object} \description{Prediction of test data for kernel quantile regression} \usage{ \S4method{predict}{kqr}(object, newdata) } \arguments{ \item{object}{an S4 object of class \code{kqr} created by the \code{kqr} function} \item{newdata}{a data frame, matrix, or kernelMatrix containing new data} } \value{The value of the quantile given by the computed \code{kqr} model in a vector of length equal to the the rows of \code{newdata}. } \author{Alexandros Karatzoglou\cr \email{alexandros.karatzoglou@ci.tuwien.ac.at}} \keyword{methods} \keyword{regression} \examples{ # create data x <- sort(runif(300)) y <- sin(pi*x) + rnorm(300,0,sd=exp(sin(2*pi*x))) # first calculate the median qrm <- kqr(x, y, tau = 0.5, C=0.15) # predict and plot plot(x, y) ytest <- predict(qrm, x) lines(x, ytest, col="blue") # calculate 0.9 quantile qrm <- kqr(x, y, tau = 0.9, kernel = "rbfdot", kpar= list(sigma=10), C=0.15) ytest <- predict(qrm, x) lines(x, ytest, col="red") }